diff options
Diffstat (limited to 'drivers/gpu/drm')
1305 files changed, 45539 insertions, 27146 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1cb5a4f19293..5504721007cc 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,9 +9,6 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS - select DRM_KMS_HELPER if DRM_FBDEV_EMULATION - select FB_CORE if DRM_FBDEV_EMULATION - select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select HDMI select I2C select DMA_SHARED_BUFFER @@ -152,6 +149,7 @@ config DRM_PANIC_SCREEN config DRM_PANIC_SCREEN_QR_CODE bool "Add a panic screen with a QR code" depends on DRM_PANIC && RUST + select ZLIB_DEFLATE help This option adds a QR code generator, and a panic screen with a QR code. The QR code will contain the last lines of kmsg and other debug @@ -210,9 +208,47 @@ config DRM_DEBUG_MODESET_LOCK If in doubt, say "N". +config DRM_CLIENT + bool + depends on DRM + help + Enables support for DRM clients. DRM drivers that need + struct drm_client_dev and its interfaces should select this + option. Drivers that support the default clients should + select DRM_CLIENT_SELECTION instead. + +config DRM_CLIENT_LIB + tristate + depends on DRM + select DRM_KMS_HELPER if DRM_FBDEV_EMULATION + select FB_CORE if DRM_FBDEV_EMULATION + help + This option enables the DRM client library and selects all + modules and components according to the enabled clients. + +config DRM_CLIENT_SELECTION + tristate + depends on DRM + select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION + help + Drivers that support in-kernel DRM clients have to select this + option. + +config DRM_CLIENT_SETUP + bool + depends on DRM_CLIENT_SELECTION + help + Enables the DRM client selection. DRM drivers that support the + default clients should select DRM_CLIENT_SELECTION instead. + +menu "Supported DRM clients" + depends on DRM_CLIENT_SELECTION + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM + depends on DRM_CLIENT_SELECTION + select DRM_CLIENT + select DRM_CLIENT_SETUP select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE default FB help @@ -251,6 +287,8 @@ config DRM_FBDEV_LEAK_PHYS_SMEM If in doubt, say "N" or spread the word to your closed source library vendor. +endmenu + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM @@ -320,19 +358,21 @@ config DRM_TTM_HELPER tristate depends on DRM select DRM_TTM + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Helpers for ttm-based gem objects config DRM_GEM_DMA_HELPER tristate depends on DRM - select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM DMA helper functions config DRM_GEM_SHMEM_HELPER tristate depends on DRM && MMU + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help Choose this if you need the GEM shmem helper functions @@ -472,6 +512,7 @@ source "drivers/gpu/drm/imagination/Kconfig" config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 784229d4504d..463afad1b5ca 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -34,15 +34,12 @@ endif subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror drm-y := \ - drm_aperture.o \ drm_atomic.o \ drm_atomic_uapi.o \ drm_auth.o \ drm_blend.o \ drm_bridge.o \ drm_cache.o \ - drm_client.o \ - drm_client_modeset.o \ drm_color_mgmt.o \ drm_connector.o \ drm_crtc.o \ @@ -68,6 +65,7 @@ drm-y := \ drm_prime.o \ drm_print.o \ drm_property.o \ + drm_rect.o \ drm_syncobj.o \ drm_sysfs.o \ drm_trace_points.o \ @@ -75,6 +73,10 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o +drm-$(CONFIG_DRM_CLIENT) += \ + drm_client.o \ + drm_client_event.o \ + drm_client_modeset.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -140,7 +142,6 @@ drm_kms_helper-y := \ drm_modeset_helper.o \ drm_plane_helper.o \ drm_probe_helper.o \ - drm_rect.o \ drm_self_refresh_helper.o \ drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o @@ -148,6 +149,14 @@ drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # +# DRM clients +# + +drm_client_lib-y := drm_client_setup.o +drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o +obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o + +# # Drivers and the rest # diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 0051fb1b437f..41fa3377d9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -5,7 +5,10 @@ config DRM_AMDGPU depends on DRM && PCI && MMU depends on !UML select FW_LOADER + select DRM_CLIENT + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index b0f95a7649bf..3a588fecb0c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -85,16 +85,9 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -246,7 +239,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) dev_err(adev->dev, "Failed to get BIF handle\n"); return -EINVAL; } - r = cmn_block->version->funcs->resume(adev); + r = amdgpu_ip_block_resume(cmn_block); if (r) return r; @@ -282,15 +275,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = true; + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -304,7 +292,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", @@ -417,6 +405,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = { static struct amdgpu_reset_handler *aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = { &aldebaran_mode2_handler, + &xgmi_reset_on_init_handler, }; int aldebaran_reset_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9b1e0ede05a4..d8bc6da50016 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -118,7 +118,7 @@ #define MAX_GPU_INSTANCE 64 -#define GFX_SLICE_PERIOD msecs_to_jiffies(250) +#define GFX_SLICE_PERIOD_MS 250 struct amdgpu_gpu_instance { struct amdgpu_device *adev; @@ -131,10 +131,6 @@ struct amdgpu_mgpu_info { uint32_t num_gpu; uint32_t num_dgpu; uint32_t num_apu; - - /* delayed reset_func for XGMI configuration if necessary */ - struct delayed_work delayed_reset_work; - bool pending_reset; }; enum amdgpu_ss { @@ -303,6 +299,12 @@ extern int amdgpu_wbrf; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) +/* reset mask */ +#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ +#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ +#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ +#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ + /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -365,8 +367,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); #define AMDGPU_MAX_IP_NUM 16 @@ -389,6 +394,7 @@ struct amdgpu_ip_block_version { struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; + struct amdgpu_device *adev; }; int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, @@ -563,6 +569,7 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE2, AMD_RESET_METHOD_BACO, AMD_RESET_METHOD_PCI, + AMD_RESET_METHOD_ON_INIT, }; struct amdgpu_video_codec_info { @@ -821,6 +828,24 @@ struct amdgpu_mqd { struct amdgpu_mqd_prop *p); }; +/* + * Custom Init levels could be defined for different situations where a full + * initialization of all hardware blocks are not expected. Sample cases are + * custom init sequences after resume after S0i3/S3, reset on initialization, + * partial reset of blocks etc. Presently, this defines only two levels. Levels + * are described in corresponding struct definitions - amdgpu_init_default, + * amdgpu_init_minimal_xgmi. + */ +enum amdgpu_init_lvl_id { + AMDGPU_INIT_LEVEL_DEFAULT, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI, +}; + +struct amdgpu_init_level { + enum amdgpu_init_lvl_id level; + uint32_t hwini_ip_block_mask; +}; + #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 struct amdgpu_reset_domain; @@ -1092,8 +1117,6 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; - /* indicate amdgpu suspension status */ - bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1166,6 +1189,8 @@ struct amdgpu_device { bool enforce_isolation[MAX_XCP]; /* Added this mutex for cleaner shader isolation between GFX and compute processes */ struct mutex enforce_isolation_mutex; + + struct amdgpu_init_level *init_lvl; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1261,6 +1286,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context); +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); + int emu_soc_asic_init(struct amdgpu_device *adev); /* @@ -1443,6 +1470,8 @@ struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, struct dma_fence *gang); bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -1450,23 +1479,15 @@ void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); -bool amdgpu_atpx_dgpu_req_power_for_displays(void); bool amdgpu_has_atpx(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } -static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } static inline bool amdgpu_has_atpx(void) { return false; } #endif -#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void); -#else -static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } -#endif - /* * KMS */ @@ -1619,4 +1640,6 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group; extern const struct attribute_group amdgpu_gtt_mgr_attr_group; extern const struct attribute_group amdgpu_flash_attr_group; +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 2ca127173135..9d6345146495 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ return -EINVAL; } - if (start + count >= max_count) + if (start + count > max_count) return -EINVAL; count = min_t(int, count, max_count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index bf6c4a0d0525..ec5e0dcf8613 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -98,9 +98,9 @@ enum { ACP_TILE_DSP2, }; -static int acp_sw_init(void *handle) +static int acp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->acp.parent = adev->dev; @@ -112,9 +112,9 @@ static int acp_sw_init(void *handle) return 0; } -static int acp_sw_fini(void *handle) +static int acp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->acp.cgs_device) amdgpu_cgs_destroy_device(adev->acp.cgs_device); @@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = { /** * acp_hw_init - start and test ACP block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_init(void *handle) +static int acp_hw_init(struct amdgpu_ip_block *ip_block) { int r; u64 acp_base; @@ -230,13 +230,7 @@ static int acp_hw_init(void *handle) u32 count = 0; struct i2s_platform_data *i2s_pdata = NULL; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); - - if (!ip_block) - return -EINVAL; + struct amdgpu_device *adev = ip_block->adev; r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); @@ -503,14 +497,14 @@ failure: /** * acp_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int acp_hw_fini(void *handle) +static int acp_hw_fini(struct amdgpu_ip_block *ip_block) { u32 val = 0; u32 count = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* return early if no ACP */ if (!adev->acp.acp_genpd) { @@ -565,9 +559,9 @@ static int acp_hw_fini(void *handle) return 0; } -static int acp_suspend(void *handle) +static int acp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power up on suspend */ if (!adev->acp.acp_cell) @@ -575,9 +569,9 @@ static int acp_suspend(void *handle) return 0; } -static int acp_resume(void *handle) +static int acp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* power down again on resume */ if (!adev->acp.acp_cell) @@ -585,26 +579,11 @@ static int acp_resume(void *handle) return 0; } -static int acp_early_init(void *handle) -{ - return 0; -} - static bool acp_is_idle(void *handle) { return true; } -static int acp_wait_for_idle(void *handle) -{ - return 0; -} - -static int acp_soft_reset(void *handle) -{ - return 0; -} - static int acp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -624,8 +603,6 @@ static int acp_set_powergating_state(void *handle, static const struct amd_ip_funcs acp_ip_funcs = { .name = "acp_ip", - .early_init = acp_early_init, - .late_init = NULL, .sw_init = acp_sw_init, .sw_fini = acp_sw_fini, .hw_init = acp_hw_init, @@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = { .suspend = acp_suspend, .resume = acp_resume, .is_idle = acp_is_idle, - .wait_for_idle = acp_wait_for_idle, - .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version acp_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 7dd55ed57c1d..b8d4e07d2043 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, return -EIO; } + kfree(info); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4f08b153cb66..3afcd1e8aa54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -834,6 +834,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang) + return 0; + ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); if (!ring_funcs) return -ENOMEM; @@ -858,8 +861,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ + r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); @@ -889,3 +898,27 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id) return kgd2kfd_start_sched(adev->kfd.dev, node_id); } + +/* check if there are KFD queues active */ +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id) +{ + if (!adev->kfd.init_complete) + return false; + + return kgd2kfd_compute_active(adev->kfd.dev, node_id); +} + +/* Config CGTT_SQ_CLK_CTRL */ +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable) +{ + int r; + + if (!adev->kfd.init_complete) + return 0; + + r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable, + reg_override_enable, perfmon_override_enable); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f9d119448442..4b80ad860639 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -266,6 +266,10 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, u32 inst); int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id); int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id); +int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id); + /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -428,6 +432,7 @@ int kgd2kfd_check_and_lock_kfd(void); void kgd2kfd_unlock_kfd(void); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); #else static inline int kgd2kfd_init(void) { @@ -508,5 +513,10 @@ static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { return 0; } + +static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + return false; +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 9435af2e6bdc..9abf29b58ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -299,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus if (r) goto out; } else { - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3bc0cbf45bc5..cc66ebb7bae1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * * @adev: Handle of device whose registers are to be read * @queue_idx: Index of queue in the queue-map bit-field - * @wave_cnt: Output parameter updated with number of waves in flight - * @vmid: Output parameter updated with VMID of queue whose wave count - * is being collected + * @queue_cnt: Stores the wave count and doorbell offset for an active queue * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, @@ -1133,10 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev, uint32_t low, high; uint64_t queue_addr = 0; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return 0; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); amdgpu_gfx_rlc_enter_safe_mode(adev, inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fa572ba7f9fc..f30548f4c3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2524,11 +2524,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, /* First eviction, stop the queues */ r = kgd2kfd_quiesce_mm(mni->mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); - if (r) + + if (r && r != -ESRCH) pr_err("Failed to quiesce KFD\n"); - queue_delayed_work(system_freezable_wq, - &process_info->restore_userptr_work, - msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); + + if (r != -ESRCH) + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, + msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } mutex_unlock(&process_info->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 0c8975ac5af9..093141ad6ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1145,8 +1145,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, return 0; } -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock) +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); @@ -1161,8 +1161,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, if (mem_clock) args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, - sizeof(args)); + return amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&args, sizeof(args)); } void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 0811474e8fd3..0e16432d9a72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -163,8 +163,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, bool strobe_mode, struct atom_mpll_param *mpll_param); -void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, - u32 eng_clock, u32 mem_clock); +int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, + u32 eng_clock, u32 mem_clock); bool amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 375f02002579..3893e6fc2f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void) return amdgpu_atpx_priv.atpx.is_hybrid; } -bool amdgpu_atpx_dgpu_req_power_for_displays(void) -{ - return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; -} - -#if defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void) -{ - return amdgpu_atpx_priv.dhandle; -} -#endif - /** * amdgpu_atpx_call - call an ATPX method * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 9da4414de617..a68338cb7b4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2095,6 +2095,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog) amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm); + amdgpu_debugfs_jpeg_sched_mask_init(adev); + amdgpu_debugfs_gfx_sched_mask_init(adev); + amdgpu_debugfs_compute_sched_mask_init(adev); + amdgpu_debugfs_sdma_sched_mask_init(adev); + amdgpu_ras_debugfs_create_all(adev); amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 5ac59b62020c..946c48829f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, struct amdgpu_coredump_info *coredump = data; struct drm_print_iterator iter; struct amdgpu_vm_fault_info *fault_info; + struct amdgpu_ip_block *ip_block; int ver; iter.data = buffer; @@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, /* dump the ip state for each ip */ drm_printf(&p, "IP Dump\n"); for (int i = 0; i < coredump->adev->num_ip_blocks; i++) { - if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) { - drm_printf(&p, "IP: %s\n", - coredump->adev->ip_blocks[i] - .version->funcs->name); - coredump->adev->ip_blocks[i] - .version->funcs->print_ip_state( - (void *)coredump->adev, &p); + ip_block = &coredump->adev->ip_blocks[i]; + if (ip_block->version->funcs->print_ip_state) { + drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name); + ip_block->version->funcs->print_ip_state(ip_block, &p); drm_printf(&p, "\n"); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c2394c8b4d6b..0171d240fcb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,8 @@ * Alex Deucher * Jerome Glisse */ + +#include <linux/aperture.h> #include <linux/power_supply.h> #include <linux/kthread.h> #include <linux/module.h> @@ -35,10 +37,9 @@ #include <linux/pci-p2pdma.h> #include <linux/apple-gmux.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/device.h> @@ -144,6 +145,51 @@ const char *amdgpu_asic_name[] = { "LAST", }; +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0) +/* + * Default init level where all blocks are expected to be initialized. This is + * the level of initialization expected by default and also after a full reset + * of the device. + */ +struct amdgpu_init_level amdgpu_init_default = { + .level = AMDGPU_INIT_LEVEL_DEFAULT, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +/* + * Minimal blocks needed to be initialized before a XGMI hive can be reset. This + * is used for cases like reset on initialization where the entire hive needs to + * be reset before first use. + */ +struct amdgpu_init_level amdgpu_init_minimal_xgmi = { + .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + .hwini_ip_block_mask = + BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) | + BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) | + BIT(AMD_IP_BLOCK_TYPE_PSP) +}; + +static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, + enum amd_ip_block_type block) +{ + return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; +} + +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl) +{ + switch (lvl) { + case AMDGPU_INIT_LEVEL_MINIMAL_XGMI: + adev->init_lvl = &amdgpu_init_minimal_xgmi; + break; + case AMDGPU_INIT_LEVEL_DEFAULT: + fallthrough; + default: + adev->init_lvl = &amdgpu_init_default; + break; + } +} + static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); /** @@ -227,6 +273,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); } +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->suspend) { + r = ip_block->version->funcs->suspend(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "suspend of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = false; + return 0; +} + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->resume) { + r = ip_block->version->funcs->resume(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "resume of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = true; + return 0; +} + /** * DOC: board_info * @@ -1655,7 +1737,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* Don't post if we need to reset whole hive on init */ - if (adev->gmc.xgmi.pending_reset) + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) return false; if (adev->has_hw_reset) { @@ -2159,9 +2241,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); - if (r) - return r; + if (adev->ip_blocks[i].version->funcs->wait_for_idle) { + r = adev->ip_blocks[i].version->funcs->wait_for_idle( + &adev->ip_blocks[i]); + if (r) + return r; + } break; } } @@ -2170,26 +2255,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_idle - is the hardware IP idle + * amdgpu_device_ip_is_valid - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * - * Check if the hardware IP is idle or not. - * Returns true if it the IP is idle, false if not. + * Check if the hardware IP is enable or not. + * Returns true if it the IP is enable, false if not. */ -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; if (adev->ip_blocks[i].version->type == block_type) - return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); + return adev->ip_blocks[i].status.valid; } - return true; + return false; } @@ -2271,6 +2354,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, ip_block_version->funcs->name); + adev->ip_blocks[adev->num_ip_blocks].adev = adev; + adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; return 0; @@ -2566,25 +2651,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) total = true; for (i = 0; i < adev->num_ip_blocks; i++) { + ip_block = &adev->ip_blocks[i]; + if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_WARN("disabled ip block: %d <%s>\n", i, adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; - } else { - if (adev->ip_blocks[i].version->funcs->early_init) { - r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); - if (r == -ENOENT) { - adev->ip_blocks[i].status.valid = false; - } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - total = false; - } else { - adev->ip_blocks[i].status.valid = true; - } + } else if (ip_block->version->funcs->early_init) { + r = ip_block->version->funcs->early_init(ip_block); + if (r == -ENOENT) { + adev->ip_blocks[i].status.valid = false; + } else if (r) { + DRM_ERROR("early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + total = false; } else { adev->ip_blocks[i].status.valid = true; } + } else { + adev->ip_blocks[i].status.valid = true; } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { @@ -2633,10 +2718,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2658,7 +2746,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2681,6 +2772,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) continue; + if (!amdgpu_ip_member_of_hwini(adev, + AMD_IP_BLOCK_TYPE_PSP)) + break; + if (!adev->ip_blocks[i].status.sw) continue; @@ -2689,22 +2784,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) break; if (amdgpu_in_reset(adev) || adev->in_suspend) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } } else { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } - - adev->ip_blocks[i].status.hw = true; break; } } @@ -2786,6 +2877,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) */ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { + bool init_badpage; int i, r; r = amdgpu_ras_init(adev); @@ -2795,17 +2887,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); - if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - goto init_failed; + if (adev->ip_blocks[i].version->funcs->sw_init) { + r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + goto init_failed; + } } adev->ip_blocks[i].status.sw = true; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { /* need to do common hw init early so everything is set up for gmc */ - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2822,7 +2920,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); goto init_failed; } - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2895,7 +2993,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - r = amdgpu_ras_recovery_init(adev); + init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); + r = amdgpu_ras_recovery_init(adev, init_badpage); if (r) goto init_failed; @@ -2935,7 +3034,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, true); /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); } @@ -3135,7 +3234,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { - r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3206,6 +3305,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (!ip_block->version->funcs->hw_fini) { + DRM_ERROR("hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); + } else { + r = ip_block->version->funcs->hw_fini(ip_block); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + } + } + + ip_block->status.hw = false; +} + /** * amdgpu_device_smu_fini_early - smu hw_fini wrapper * @@ -3215,7 +3333,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) */ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { - int i, r; + int i; if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; @@ -3224,13 +3342,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); break; } } @@ -3244,7 +3356,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].version->funcs->early_fini) continue; - r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { DRM_DEBUG("early_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3263,14 +3375,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); } if (amdgpu_sriov_vf(adev)) { @@ -3316,12 +3421,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); } - - r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + if (adev->ip_blocks[i].version->funcs->sw_fini) { + r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } } adev->ip_blocks[i].status.sw = false; adev->ip_blocks[i].status.valid = false; @@ -3331,7 +3437,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.late_initialized) continue; if (adev->ip_blocks[i].version->funcs->late_fini) - adev->ip_blocks[i].version->funcs->late_fini((void *)adev); + adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); adev->ip_blocks[i].status.late_initialized = false; } @@ -3403,15 +3509,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -3449,14 +3549,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } /* skip unnecessary suspend if we do not initialize them yet */ - if (adev->gmc.xgmi.pending_reset && - !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { - adev->ip_blocks[i].status.hw = false; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) continue; - } /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just @@ -3490,13 +3585,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); adev->ip_blocks[i].status.hw = false; + /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { @@ -3570,7 +3661,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) !block->status.valid) continue; - r = block->version->funcs->hw_init(adev); + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; @@ -3609,15 +3700,19 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block->status.hw) continue; - if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) - r = block->version->funcs->resume(adev); - else - r = block->version->funcs->hw_init(adev); - - DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) - return r; - block->status.hw = true; + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; + } else { + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + block->status.hw = true; + } } } @@ -3648,13 +3743,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } } @@ -3686,13 +3777,9 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } return 0; @@ -4149,7 +4236,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * for throttling interrupt) = 60 seconds. */ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); + ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); + ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -4193,13 +4283,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_set_mcbp(adev); + /* + * By default, use default mode where all blocks are expected to be + * initialized. At present a 'swinit' of blocks is required to be + * completed before the need for a different level is detected. + */ + amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT); /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) return r; /* Get rid of things like offb */ - r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); + r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); if (r) return r; @@ -4265,20 +4361,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { if (adev->gmc.xgmi.num_physical_nodes) { dev_info(adev->dev, "Pending hive reset.\n"); - adev->gmc.xgmi.pending_reset = true; - /* Only need to init necessary block for SMU to handle the reset */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { - DRM_DEBUG("IP %s disabled for hw_init.\n", - adev->ip_blocks[i].version->funcs->name); - adev->ip_blocks[i].status.hw = true; - } - } + amdgpu_set_init_level(adev, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI); } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && !amdgpu_device_has_display_hardware(adev)) { r = psp_gpu_reset(adev); @@ -4386,7 +4470,7 @@ fence_driver_init: /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { r = amdgpu_device_ip_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); @@ -4436,6 +4520,7 @@ fence_driver_init: amdgpu_fru_sysfs_init(adev); amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_cfg_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4463,9 +4548,8 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->gmc.xgmi.pending_reset) - queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); amdgpu_device_check_iommu_direct_map(adev); @@ -4559,6 +4643,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fru_sysfs_fini(adev); amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_cfg_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4694,7 +4779,7 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; if (!adev->ip_blocks[i].version->funcs->prepare_suspend) continue; - r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) goto unprepare; } @@ -4711,13 +4796,13 @@ unprepare: * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer - * @fbcon : notify the fbdev of suspend + * @notify_clients: notify in-kernel DRM clients * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4737,8 +4822,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + if (notify_clients) + drm_client_dev_suspend(adev_to_drm(adev), false); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4773,13 +4858,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) * amdgpu_device_resume - initiate device resume * * @dev: drm dev pointer - * @fbcon : notify the fbdev of resume + * @notify_clients: notify in-kernel DRM clients * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4835,8 +4920,8 @@ exit: /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev), false); amdgpu_ras_resume(adev); @@ -4898,7 +4983,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->funcs->check_soft_reset) adev->ip_blocks[i].status.hang = - adev->ip_blocks[i].version->funcs->check_soft_reset(adev); + adev->ip_blocks[i].version->funcs->check_soft_reset( + &adev->ip_blocks[i]); if (adev->ip_blocks[i].status.hang) { dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; @@ -4927,7 +5013,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->pre_soft_reset) { - r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4989,7 +5075,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->soft_reset) { - r = adev->ip_blocks[i].version->funcs->soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5018,7 +5104,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->post_soft_reset) - r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -5103,6 +5189,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); + + amdgpu_virt_ras_telemetry_post_reset(adev); + return 0; } @@ -5309,7 +5398,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < tmp_adev->num_ip_blocks; i++) if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) tmp_adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)tmp_adev); + ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); } @@ -5325,74 +5414,25 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -int amdgpu_do_asic_reset(struct list_head *device_list_handle, - struct amdgpu_reset_context *reset_context) +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) { - struct amdgpu_device *tmp_adev = NULL; - bool need_full_reset, skip_hw_reset, vram_lost = false; - int r = 0; - - /* Try reset handler method first */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); - - reset_context->reset_device_list = device_list_handle; - r = amdgpu_reset_perform_reset(tmp_adev, reset_context); - /* If reset handler not implemented, continue; otherwise return */ - if (r == -EOPNOTSUPP) - r = 0; - else - return r; - - /* Reset handler not implemented, use the default method */ - need_full_reset = - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); - - /* - * ASIC reset has to be done on all XGMI hive nodes ASAP - * to allow proper links negotiation in FW (within 1 sec) - */ - if (!skip_hw_reset && need_full_reset) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* For XGMI run all resets in parallel to speed up the process */ - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - tmp_adev->gmc.xgmi.pending_reset = false; - if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - } else - r = amdgpu_asic_reset(tmp_adev); + struct list_head *device_list_handle; + bool full_reset, vram_lost = false; + struct amdgpu_device *tmp_adev; + int r; - if (r) { - dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", - r, adev_to_drm(tmp_adev)->unique); - goto out; - } - } + device_list_handle = reset_context->reset_device_list; - /* For XGMI wait for all resets to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - } - } - } - } + if (!device_list_handle) + return -EINVAL; - if (!r && amdgpu_ras_intr_triggered()) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); - } - - amdgpu_ras_intr_cleared(); - } + full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + r = 0; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (need_full_reset) { + /* After reset, it's default init level */ + amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT); + if (full_reset) { /* post card */ amdgpu_ras_set_fed(tmp_adev, false); r = amdgpu_device_asic_init(tmp_adev); @@ -5448,7 +5488,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); + drm_client_dev_resume(adev_to_drm(tmp_adev), false); /* * The GPU enters bad state once faulty pages @@ -5482,7 +5522,6 @@ out: r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); - need_full_reset = true; r = -EAGAIN; goto end; } @@ -5493,10 +5532,85 @@ out: } end: - if (need_full_reset) + return r; +} + +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset, skip_hw_reset; + int r = 0; + + /* Try reset handler method first */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + + reset_context->reset_device_list = device_list_handle; + r = amdgpu_reset_perform_reset(tmp_adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -EOPNOTSUPP) + r = 0; + else + return r; + + /* Reset handler not implemented, use the default method */ + need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + + /* + * ASIC reset has to be done on all XGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (!skip_hw_reset && need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + dev_err(tmp_adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); + goto out; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + reset_list) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + amdgpu_ras_reset_error_count(tmp_adev, + AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_intr_cleared(); + } + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r == -EAGAIN) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); else clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + +out: return r; } @@ -5734,7 +5848,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); + drm_client_dev_suspend(adev_to_drm(tmp_adev), false); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5824,7 +5938,7 @@ skip_hw_reset: if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -6092,6 +6206,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); + if (!p2p_access) + dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", + pci_name(peer_adev->pdev)); bool is_large_bar = adev->gmc.visible_vram_size && adev->gmc.real_vram_size == adev->gmc.visible_vram_size; @@ -6331,7 +6448,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } amdgpu_device_unset_mp1_state(adev); @@ -6344,6 +6461,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); int r; + if (amdgpu_sriov_vf(adev)) + return false; + r = pci_save_state(pdev); if (!r) { kfree(adev->pci_state); @@ -6604,3 +6724,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, } return ret; } + +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring) +{ + ssize_t size = 0; + + if (!ring || !ring->adev) + return size; + + if (amdgpu_device_should_recover_gpu(ring->adev)) + size |= AMDGPU_RESET_TYPE_FULL; + + if (unlikely(!ring->adev->debug_disable_soft_recovery) && + !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) + size |= AMDGPU_RESET_TYPE_SOFT_RESET; + + return size; +} + +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) +{ + ssize_t size = 0; + + if (supported_reset == 0) { + size += sysfs_emit_at(buf, size, "unsupported"); + size += sysfs_emit_at(buf, size, "\n"); + return size; + + } + + if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET) + size += sysfs_emit_at(buf, size, "soft "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + size += sysfs_emit_at(buf, size, "queue "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE) + size += sysfs_emit_at(buf, size, "pipe "); + + if (supported_reset & AMDGPU_RESET_TYPE_FULL) + size += sysfs_emit_at(buf, size, "full "); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 4bd61c169ca8..1040204ac8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1723,45 +1723,85 @@ union nps_info { struct nps_info_v1_0 v1; }; +static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev, + union nps_info *nps_data) +{ + uint64_t vram_size, pos, offset; + struct nps_info_header *nhdr; + struct binary_header bhdr; + uint16_t checksum; + + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; + pos = vram_size - DISCOVERY_TMR_OFFSET; + amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false); + + offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset); + checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum); + + amdgpu_device_vram_access(adev, (pos + offset), nps_data, + sizeof(*nps_data), false); + + nhdr = (struct nps_info_header *)(nps_data); + if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data, + le32_to_cpu(nhdr->size_bytes), + checksum)) { + dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); + return -EINVAL; + } + + return 0; +} + int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt) + int *range_cnt, bool refresh) { struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; + union nps_info nps_data; u16 offset; - int i; + int i, r; if (!nps_type || !range_cnt || !ranges) return -EINVAL; - if (!adev->mman.discovery_bin) { - dev_err(adev->dev, - "fetch mem range failed, ip discovery uninitialized\n"); - return -EINVAL; - } + if (refresh) { + r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); + if (r) + return r; + nps_info = &nps_data; + } else { + if (!adev->mman.discovery_bin) { + dev_err(adev->dev, + "fetch mem range failed, ip discovery uninitialized\n"); + return -EINVAL; + } - bhdr = (struct binary_header *)adev->mman.discovery_bin; - offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); + bhdr = (struct binary_header *)adev->mman.discovery_bin; + offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); - if (!offset) - return -ENOENT; + if (!offset) + return -ENOENT; - /* If verification fails, return as if NPS table doesn't exist */ - if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) - return -ENOENT; + /* If verification fails, return as if NPS table doesn't exist */ + if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) + return -ENOENT; - nps_info = (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = + (union nps_info *)(adev->mman.discovery_bin + offset); + } switch (le16_to_cpu(nps_info->v1.header.version_major)) { case 1: + mem_ranges = kvcalloc(nps_info->v1.count, + sizeof(*mem_ranges), + GFP_KERNEL); + if (!mem_ranges) + return -ENOMEM; *nps_type = nps_info->v1.nps_type; *range_cnt = nps_info->v1.count; - mem_ranges = kvzalloc( - *range_cnt * sizeof(struct amdgpu_gmc_memrange), - GFP_KERNEL); for (i = 0; i < *range_cnt; i++) { mem_ranges[i].base_address = nps_info->v1.instance_info[i].base_address; @@ -2492,6 +2532,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } else { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); @@ -2508,6 +2549,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); + adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0); } break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index f5d36525ec3e..b44d56465c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, - int *range_cnt); + int *range_cnt, bool refresh); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 81d9877c8735..38686203bea6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -23,6 +23,7 @@ */ #include <drm/amdgpu_drm.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem.h> @@ -231,8 +232,6 @@ int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ int amdgpu_umsch_mm_fwlog; -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); - DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", "DRM_UT_DRIVER", @@ -247,9 +246,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), - .delayed_reset_work = __DELAYED_WORK_INITIALIZER( - mgpu_info.delayed_reset_work, - amdgpu_drv_delayed_reset_work_handler, 0), }; int amdgpu_ras_enable = -1; uint amdgpu_ras_mask = 0xffffffff; @@ -892,7 +888,7 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444); * the ABM algorithm, with 1 being the least reduction and 4 being the most * reduction. * - * Defaults to -1, or disabled. Userspace can only override this level after + * Defaults to -1, or auto. Userspace can only override this level after * boot if it's set to auto. */ int amdgpu_dm_abm_level = -1; @@ -2365,11 +2361,15 @@ retry_init: */ if (adev->mode_info.mode_config_initialized && !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { + const struct drm_format_info *format; + /* select 8 bpp console on low vram cards */ if (adev->gmc.real_vram_size <= (32*1024*1024)) - drm_fbdev_ttm_setup(adev_to_drm(adev), 8); + format = drm_format_info(DRM_FORMAT_C8); else - drm_fbdev_ttm_setup(adev_to_drm(adev), 32); + format = NULL; + + drm_client_setup(adev_to_drm(adev), format); } ret = amdgpu_debugfs_init(adev); @@ -2434,6 +2434,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); amdgpu_xcp_dev_unplug(adev); + amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { @@ -2472,82 +2473,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) adev->mp1_state = PP_MP1_STATE_NONE; } -/** - * amdgpu_drv_delayed_reset_work_handler - work handler for reset - * - * @work: work_struct. - */ -static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) -{ - struct list_head device_list; - struct amdgpu_device *adev; - int i, r; - struct amdgpu_reset_context reset_context; - - memset(&reset_context, 0, sizeof(reset_context)); - - mutex_lock(&mgpu_info.mutex); - if (mgpu_info.pending_reset == true) { - mutex_unlock(&mgpu_info.mutex); - return; - } - mgpu_info.pending_reset = true; - mutex_unlock(&mgpu_info.mutex); - - /* Use a common context, just need to make sure full reset is done */ - reset_context.method = AMD_RESET_METHOD_NONE; - set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - reset_context.reset_req_dev = adev; - r = amdgpu_device_pre_asic_reset(adev, &reset_context); - if (r) { - dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", - r, adev_to_drm(adev)->unique); - } - if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work)) - r = -EALREADY; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - flush_work(&adev->xgmi_reset_work); - adev->gmc.xgmi.pending_reset = false; - } - - /* reset function will rebuild the xgmi hive info , clear it now */ - for (i = 0; i < mgpu_info.num_dgpu; i++) - amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev); - - INIT_LIST_HEAD(&device_list); - - for (i = 0; i < mgpu_info.num_dgpu; i++) - list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list); - - /* unregister the GPU first, reset function will add them back */ - list_for_each_entry(adev, &device_list, reset_list) - amdgpu_unregister_gpu_instance(adev); - - /* Use a common context, just need to make sure full reset is done */ - set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); - r = amdgpu_do_asic_reset(&device_list, &reset_context); - - if (r) { - DRM_ERROR("reinit gpus failure"); - return; - } - for (i = 0; i < mgpu_info.num_dgpu; i++) { - adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) { - kgd2kfd_init_zone_device(adev); - amdgpu_amdkfd_device_init(adev); - amdgpu_amdkfd_drm_client_create(adev); - } - amdgpu_ttm_set_buffer_funcs_status(adev, true); - } -} - static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); @@ -2580,7 +2505,6 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2595,7 +2519,6 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); @@ -2982,6 +2905,7 @@ static const struct drm_driver amdgpu_kms_driver = { .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, #ifdef CONFIG_PROC_FS @@ -3008,6 +2932,7 @@ const struct drm_driver amdgpu_partition_driver = { .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, @@ -3068,6 +2993,12 @@ static int __init amdgpu_init(void) /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); + if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) { + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_crit("Overdrive is enabled, please disable it before " + "reporting any bugs unrelated to overdrive.\n"); + } + /* let modprobe override vga console setting */ return pci_register_driver(&amdgpu_kms_pci_driver); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c index 35fee3e8cde2..8cd69836dd99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -200,7 +200,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, dev_err_ratelimited(&i2c_adap->dev, "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", eeprom_addr, buf_size, - read ? "read" : "write", EEPROM_OFFSET_SIZE); + str_read_write(read), EEPROM_OFFSET_SIZE); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index c7df7fa3459f..df2cf5c33925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> +#include <drm/drm_file.h> #include "amdgpu.h" #include "amdgpu_vm.h" @@ -59,18 +60,25 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_mem_stats stats; + struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { }; ktime_t usage[AMDGPU_HW_IP_NUM]; - unsigned int hw_ip; + const char *pl_name[] = { + [TTM_PL_VRAM] = "vram", + [TTM_PL_TT] = "gtt", + [TTM_PL_SYSTEM] = "cpu", + [AMDGPU_PL_GDS] = "gds", + [AMDGPU_PL_GWS] = "gws", + [AMDGPU_PL_OA] = "oa", + [AMDGPU_PL_DOORBELL] = "doorbell", + }; + unsigned int hw_ip, i; int ret; - memset(&stats, 0, sizeof(stats)); - ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret) return; - amdgpu_vm_get_memory(vm, &stats); + amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats)); amdgpu_bo_unreserve(vm->root.bo); amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage); @@ -82,24 +90,33 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); - drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); - drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); - drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n", - stats.visible_vram/1024UL); + + for (i = 0; i < ARRAY_SIZE(pl_name); i++) { + if (!pl_name[i]) + continue; + + drm_print_memory_stats(p, + &stats[i].drm, + DRM_GEM_OBJECT_RESIDENT | + DRM_GEM_OBJECT_PURGEABLE, + pl_name[i]); + } + + /* Legacy amdgpu keys, alias to drm-resident-memory-: */ + drm_printf(p, "drm-memory-vram:\t%llu KiB\n", + stats[TTM_PL_VRAM].drm.resident/1024UL); + drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", + stats[TTM_PL_TT].drm.resident/1024UL); + drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", + stats[TTM_PL_SYSTEM].drm.resident/1024UL); + + /* Amdgpu specific memory accounting keys: */ drm_printf(p, "amd-evicted-vram:\t%llu KiB\n", - stats.evicted_vram/1024UL); - drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n", - stats.evicted_visible_vram/1024UL); + stats[TTM_PL_VRAM].evicted/1024UL); drm_printf(p, "amd-requested-vram:\t%llu KiB\n", - stats.requested_vram/1024UL); - drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n", - stats.requested_visible_vram/1024UL); + stats[TTM_PL_VRAM].requested/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", - stats.requested_gtt/1024UL); - drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); - drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); - drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + stats[TTM_PL_TT].requested/1024UL); for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 256b95232de5..b2033f8352f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -78,8 +78,9 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) if (adev->dummy_page_addr) return 0; - adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); + adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); adev->dummy_page_addr = 0; @@ -99,8 +100,9 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { if (!adev->dummy_page_addr) return; - dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); adev->dummy_page_addr = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f1ffab5a1eae..f57cc72c43cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -87,16 +87,6 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, return bit; } -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue) -{ - *queue = bit % adev->gfx.me.num_queue_per_pipe; - *pipe = (bit / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; - *me = (bit / adev->gfx.me.num_queue_per_pipe) - / adev->gfx.me.num_pipe_per_me; -} - bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue) { @@ -415,7 +405,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL); if (!kiq->mqd_backup) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); @@ -438,7 +428,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.me.mqd_backup[i]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -462,7 +452,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[j]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM; @@ -525,6 +515,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev)) + return 0; + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { @@ -538,20 +531,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.compute_ring[j], RESET_QUEUES, 0, 0); } - - /** - * This is workaround: only skip kiq_ring test - * during ras recovery in suspend stage for gfx9.4.3 + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. */ - if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) && - amdgpu_ras_in_recovery(adev)) { - spin_unlock(&kiq->ring_lock); - return 0; - } + r = amdgpu_ring_test_helper(kiq_ring); - if (kiq_ring->sched.ready && !adev->job_hang) - r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); return r; @@ -579,8 +567,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&kiq->ring_lock); + if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang) + return 0; + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_gfx_rings)) { spin_unlock(&kiq->ring_lock); @@ -593,11 +584,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j], PREEMPT_QUEUES, 0, 0); } - } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); - if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + /* + * Ring test will do a basic scratch register change check. + * Just run this to ensure that unmap queues that is submitted + * before got processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&kiq->ring_lock); + spin_unlock(&kiq->ring_lock); + } return r; } @@ -702,7 +699,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[j]); } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -753,7 +756,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) &adev->gfx.gfx_ring[j]); } } - + /* Submit map queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that map queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) @@ -895,6 +904,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (adev->gfx.cp_ecc_error_irq.funcs) { r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) @@ -1363,35 +1375,35 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return count; } +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, struct device_attribute *addr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - char *supported_partition; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + int size = 0, mode; + char *sep = ""; - /* TBD */ - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - supported_partition = "SPX, DPX, QPX, CPX"; - break; - case 6: - supported_partition = "SPX, TPX, CPX"; - break; - case 4: - supported_partition = "SPX, DPX, CPX"; - break; - /* this seems only existing in emulation phase */ - case 2: - supported_partition = "SPX, CPX"; - break; - default: - supported_partition = "Not supported"; - break; + if (!xcp_mgr || !xcp_mgr->avail_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->avail_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; } - return sysfs_emit(buf, "%s\n", supported_partition); + size += sysfs_emit_at(buf, size, "\n"); + + return size; } static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) @@ -1586,9 +1598,11 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, if (adev->enforce_isolation[i] && !partition_values[i]) { /* Going from enabled to disabled */ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, false); } else if (!adev->enforce_isolation[i] && partition_values[i]) { /* Going from disabled to enabled */ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); + amdgpu_mes_set_enforce_isolation(adev, i, true); } adev->enforce_isolation[i] = partition_values[i]; } @@ -1598,6 +1612,32 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, return count; } +static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset); +} + +static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset); +} + static DEVICE_ATTR(run_cleaner_shader, 0200, NULL, amdgpu_gfx_set_run_cleaner_shader); @@ -1611,45 +1651,136 @@ static DEVICE_ATTR(current_compute_partition, 0644, static DEVICE_ATTR(available_compute_partition, 0444, amdgpu_gfx_get_available_compute_partition, NULL); +static DEVICE_ATTR(gfx_reset_mask, 0444, + amdgpu_gfx_get_gfx_reset_mask, NULL); -int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +static DEVICE_ATTR(compute_reset_mask, 0444, + amdgpu_gfx_get_compute_reset_mask, NULL); + +static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; int r; + if (!xcp_mgr) + return 0; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); + + if (!xcp_switch_supported) + dev_attr_current_compute_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + r = device_create_file(adev->dev, &dev_attr_current_compute_partition); if (r) return r; - r = device_create_file(adev->dev, &dev_attr_available_compute_partition); + if (xcp_switch_supported) + r = device_create_file(adev->dev, + &dev_attr_available_compute_partition); return r; } -void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev) { + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + bool xcp_switch_supported; + + if (!xcp_mgr) + return; + + xcp_switch_supported = + (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); device_remove_file(adev->dev, &dev_attr_current_compute_partition); - device_remove_file(adev->dev, &dev_attr_available_compute_partition); + + if (xcp_switch_supported) + device_remove_file(adev->dev, + &dev_attr_available_compute_partition); } -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) +static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) { int r; r = device_create_file(adev->dev, &dev_attr_enforce_isolation); if (r) return r; + if (adev->gfx.enable_cleaner_shader) + r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); - r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); - if (r) + return r; +} + +static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_enforce_isolation); + if (adev->gfx.enable_cleaner_shader) + device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); +} + +static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) return r; - return 0; + if (adev->gfx.num_gfx_rings) { + r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask); + if (r) + return r; + } + + if (adev->gfx.num_compute_rings) { + r = device_create_file(adev->dev, &dev_attr_compute_reset_mask); + if (r) + return r; + } + + return r; } -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) +static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev) { - device_remove_file(adev->dev, &dev_attr_enforce_isolation); - device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); + if (!amdgpu_gpu_recovery) + return; + + if (adev->gfx.num_gfx_rings) + device_remove_file(adev->dev, &dev_attr_gfx_reset_mask); + + if (adev->gfx.num_compute_rings) + device_remove_file(adev->dev, &dev_attr_compute_reset_mask); +} + +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_gfx_sysfs_xcp_init(adev); + if (r) { + dev_err(adev->dev, "failed to create xcp sysfs files"); + return r; + } + + r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + if (r) + dev_err(adev->dev, "failed to create isolation sysfs files"); + + r = amdgpu_gfx_sysfs_reset_mask_init(adev); + if (r) + dev_err(adev->dev, "failed to create reset mask sysfs files"); + + return r; +} + +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +{ + amdgpu_gfx_sysfs_xcp_fini(adev); + amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_reset_mask_fini(adev); } int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, @@ -1737,7 +1868,7 @@ static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, if (adev->gfx.kfd_sch_req_count[idx] == 0 && adev->gfx.kfd_sch_inactive[idx]) { schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); } } else { if (adev->gfx.kfd_sch_req_count[idx] == 0) { @@ -1792,8 +1923,9 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); } if (fences) { + /* we've already had our timeslice, so let's wrap this up */ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, - GFX_SLICE_PERIOD); + msecs_to_jiffies(1)); } else { /* Tell KFD to resume the runqueue */ if (adev->kfd.init_complete) { @@ -1806,6 +1938,51 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) mutex_unlock(&adev->enforce_isolation_mutex); } +static void +amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev, + u32 idx) +{ + unsigned long cjiffies; + bool wait = false; + + mutex_lock(&adev->enforce_isolation_mutex); + if (adev->enforce_isolation[idx]) { + /* set the initial values if nothing is set */ + if (!adev->gfx.enforce_isolation_jiffies[idx]) { + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + /* Make sure KFD gets a chance to run */ + if (amdgpu_amdkfd_compute_active(adev, idx)) { + cjiffies = jiffies; + if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) { + cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; + if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) { + /* if our time is up, let KGD work drain before scheduling more */ + wait = true; + /* reset the timer period */ + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } else { + /* set the timer period to what's left in our time slice */ + adev->gfx.enforce_isolation_time[idx] = + GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies); + } + } else { + /* if jiffies wrap around we will just wait a little longer */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + } + } else { + /* if there is no KFD work, then set the full slice period */ + adev->gfx.enforce_isolation_jiffies[idx] = jiffies; + adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS; + } + } + mutex_unlock(&adev->enforce_isolation_mutex); + + if (wait) + msleep(GFX_SLICE_PERIOD_MS); +} + void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1822,6 +1999,9 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) if (idx >= MAX_XCP) return; + /* Don't submit more work until KFD has had some time */ + amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx); + mutex_lock(&adev->enforce_isolation_mutex); if (adev->enforce_isolation[idx]) { if (adev->kfd.init_complete) @@ -1853,3 +2033,144 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) } mutex_unlock(&adev->enforce_isolation_mutex); } + +/* + * debugfs for to enable/disable gfx job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_gfx_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { + ring = &adev->gfx.gfx_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops, + amdgpu_debugfs_gfx_sched_mask_get, + amdgpu_debugfs_gfx_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_gfx_rings > 1)) + return; + sprintf(name, "amdgpu_gfx_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_gfx_sched_mask_fops); +#endif +} + +/* + * debugfs for to enable/disable compute job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->gfx.num_compute_rings) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->gfx.num_compute_rings; ++i) { + ring = &adev->gfx.compute_ring[i]; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops, + amdgpu_debugfs_compute_sched_mask_get, + amdgpu_debugfs_compute_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->gfx.num_compute_rings > 1)) + return; + sprintf(name, "amdgpu_compute_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_compute_sched_mask_fops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5644e10a86a9..8b5bd63b5773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -424,6 +424,8 @@ struct amdgpu_gfx { /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; + uint32_t gfx_supported_reset; + uint32_t compute_supported_reset; /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ @@ -472,6 +474,8 @@ struct amdgpu_gfx { struct mutex kfd_sch_mutex; u64 kfd_sch_req_count[MAX_XCP]; bool kfd_sch_inactive[MAX_XCP]; + unsigned long enforce_isolation_jiffies[MAX_XCP]; + unsigned long enforce_isolation_time[MAX_XCP]; }; struct amdgpu_gfx_ras_reg_entry { @@ -540,8 +544,6 @@ bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); -void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, - int *me, int *pipe, int *queue); bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); @@ -579,11 +581,11 @@ void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, unsigned int cleaner_shader_size, const void *cleaner_shader_ptr); -int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev); -void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev); void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); +void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); +void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 17a19d49d30a..1c19a65e6553 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -1065,18 +1065,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); } -/** - * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address - * from CPU's view - * - * @adev: amdgpu_device pointer - * @bo: amdgpu buffer object - */ -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) -{ - return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; -} - int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) { struct amdgpu_bo *vram_bo = NULL; @@ -1130,6 +1118,79 @@ release_buffer: return ret; } +static const char *nps_desc[] = { + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +static ssize_t available_memory_partition_show(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int size = 0, mode; + char *sep = ""; + + for_each_inst(mode, adev->gmc.supported_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t current_memory_partition_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + struct amdgpu_hive_info *hive; + int i; + + mode = UNKNOWN_MEMORY_PARTITION_MODE; + for_each_inst(i, adev->gmc.supported_nps_modes) { + if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) { + mode = i; + break; + } + } + + if (mode == UNKNOWN_MEMORY_PARTITION_MODE) + return -EINVAL; + + if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) { + dev_info( + adev->dev, + "requested NPS mode is same as current NPS mode, skipping\n"); + return count; + } + + /* If device is part of hive, all devices in the hive should request the + * same mode. Hence store the requested mode in hive. + */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + atomic_set(&hive->requested_nps_mode, mode); + amdgpu_put_xgmi_hive(hive); + } else { + adev->gmc.requested_nps_mode = mode; + } + + dev_info( + adev->dev, + "NPS mode change requested, please remove and reload the driver\n"); + + return count; +} + static ssize_t current_memory_partition_show( struct device *dev, struct device_attribute *addr, char *buf) { @@ -1138,53 +1199,65 @@ static ssize_t current_memory_partition_show( enum amdgpu_memory_partition mode; mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - switch (mode) { - case AMDGPU_NPS1_PARTITION_MODE: - return sysfs_emit(buf, "NPS1\n"); - case AMDGPU_NPS2_PARTITION_MODE: - return sysfs_emit(buf, "NPS2\n"); - case AMDGPU_NPS3_PARTITION_MODE: - return sysfs_emit(buf, "NPS3\n"); - case AMDGPU_NPS4_PARTITION_MODE: - return sysfs_emit(buf, "NPS4\n"); - case AMDGPU_NPS6_PARTITION_MODE: - return sysfs_emit(buf, "NPS6\n"); - case AMDGPU_NPS8_PARTITION_MODE: - return sysfs_emit(buf, "NPS8\n"); - default: + if ((mode >= ARRAY_SIZE(nps_desc)) || + (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode)) return sysfs_emit(buf, "UNKNOWN\n"); - } + + return sysfs_emit(buf, "%s\n", nps_desc[mode]); } -static DEVICE_ATTR_RO(current_memory_partition); +static DEVICE_ATTR_RW(current_memory_partition); +static DEVICE_ATTR_RO(available_memory_partition); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) { + bool nps_switch_support; + int r = 0; + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) return 0; + nps_switch_support = (hweight32(adev->gmc.supported_nps_modes & + AMDGPU_ALL_NPS_MASK) > 1); + if (!nps_switch_support) + dev_attr_current_memory_partition.attr.mode &= + ~(S_IWUSR | S_IWGRP | S_IWOTH); + else + r = device_create_file(adev->dev, + &dev_attr_available_memory_partition); + + if (r) + return r; + return device_create_file(adev->dev, &dev_attr_current_memory_partition); } void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) { + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) + return; + device_remove_file(adev->dev, &dev_attr_current_memory_partition); + device_remove_file(adev->dev, &dev_attr_available_memory_partition); } int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges) + uint8_t *exp_ranges) { struct amdgpu_gmc_memrange *ranges; int range_cnt, ret, i, j; uint32_t nps_type; + bool refresh; - if (!mem_ranges) + if (!mem_ranges || !exp_ranges) return -EINVAL; + refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && + (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS); ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges, - &range_cnt); + &range_cnt, refresh); if (ret) return ret; @@ -1192,16 +1265,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, /* TODO: For now, expect ranges and partition count to be the same. * Adjust if there are holes expected in any NPS domain. */ - if (range_cnt != exp_ranges) { + if (*exp_ranges && (range_cnt != *exp_ranges)) { dev_warn( adev->dev, "NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d", - exp_ranges, nps_type, range_cnt); + *exp_ranges, nps_type, range_cnt); ret = -EINVAL; goto err; } - for (i = 0; i < exp_ranges; ++i) { + for (i = 0; i < range_cnt; ++i) { if (ranges[i].base_address >= ranges[i].limit_address) { dev_warn( adev->dev, @@ -1242,8 +1315,81 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, ranges[i].limit_address - ranges[i].base_address + 1; } + if (!*exp_ranges) + *exp_ranges = range_cnt; err: kfree(ranges); return ret; } + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode) +{ + /* Not supported on VF devices and APUs */ + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return -EOPNOTSUPP; + + if (!adev->psp.funcs) { + dev_err(adev->dev, + "PSP interface not available for nps mode change request"); + return -EINVAL; + } + + return psp_memory_partition(&adev->psp, nps_mode); +} + +static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev, + int req_nps_mode, + int cur_nps_mode) +{ + return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) == + BIT(req_nps_mode)) && + req_nps_mode != cur_nps_mode); +} + +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev) +{ + int req_nps_mode, cur_nps_mode, r; + struct amdgpu_hive_info *hive; + + if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes || + !adev->gmc.gmc_funcs->request_mem_partition_mode) + return; + + cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + req_nps_mode = atomic_read(&hive->requested_nps_mode); + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, + cur_nps_mode)) { + amdgpu_put_xgmi_hive(hive); + return; + } + r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode); + amdgpu_put_xgmi_hive(hive); + goto out; + } + + req_nps_mode = adev->gmc.requested_nps_mode; + if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode)) + return; + + /* even if this fails, we should let driver unload w/o blocking */ + r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode); +out: + if (r) + dev_err(adev->dev, "NPS mode change request failed\n"); + else + dev_info( + adev->dev, + "NPS mode change request done, reload driver to complete the change\n"); +} + +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->gmc.gmc_funcs->need_reset_on_init) + return adev->gmc.gmc_funcs->need_reset_on_init(adev); + + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 4d951a1baefa..459a30fe239f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -73,6 +73,13 @@ enum amdgpu_memory_partition { AMDGPU_NPS8_PARTITION_MODE = 8, }; +#define AMDGPU_ALL_NPS_MASK \ + (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \ + BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \ + BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE)) + +#define AMDGPU_GMC_INIT_RESET_NPS BIT(0) + /* * GMC page fault information */ @@ -161,6 +168,10 @@ struct amdgpu_gmc_funcs { enum amdgpu_memory_partition (*query_mem_partition_mode)( struct amdgpu_device *adev); + /* Request NPS mode */ + int (*request_mem_partition_mode)(struct amdgpu_device *adev, + int nps_mode); + bool (*need_reset_on_init)(struct amdgpu_device *adev); }; struct amdgpu_xgmi_ras { @@ -182,7 +193,6 @@ struct amdgpu_xgmi { bool supported; struct ras_common_if *ras_if; bool connected_to_cpu; - bool pending_reset; struct amdgpu_xgmi_ras *ras; }; @@ -305,6 +315,9 @@ struct amdgpu_gmc { struct amdgpu_mem_partition_info *mem_partitions; uint8_t num_mem_partitions; const struct amdgpu_gmc_funcs *gmc_funcs; + enum amdgpu_memory_partition requested_nps_mode; + uint32_t supported_nps_modes; + uint32_t reset_flags; struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; @@ -447,13 +460,17 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); -uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, struct amdgpu_mem_partition_info *mem_ranges, - int exp_ranges); + uint8_t *exp_ranges); + +int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, + int nps_mode); +void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); +bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 00d6211e0fbf..f0765ccde668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -225,15 +225,6 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) kfree(i2c); } -/* Add the default buses */ -void amdgpu_i2c_init(struct amdgpu_device *adev) -{ - if (amdgpu_hw_i2c) - DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); - - amdgpu_atombios_i2c_init(adev); -} - /* remove all the buses */ void amdgpu_i2c_fini(struct amdgpu_device *adev) { @@ -247,22 +238,6 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) } } -/* Add additional buses */ -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name) -{ - struct drm_device *dev = adev_to_drm(adev); - int i; - - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { - if (!adev->i2c_bus[i]) { - adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name); - return; - } - } -} - /* looks up bus based on id */ struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h index 63c2ff7499e1..21e3d1dad0a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h @@ -28,11 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, const struct amdgpu_i2c_bus_rec *rec, const char *name); void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); -void amdgpu_i2c_init(struct amdgpu_device *adev); void amdgpu_i2c_fini(struct amdgpu_device *adev); -void amdgpu_i2c_add(struct amdgpu_device *adev, - const struct amdgpu_i2c_bus_rec *rec, - const char *name); struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, const struct amdgpu_i2c_bus_rec *i2c_bus); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 92d27d32de41..8e712a11aba5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -342,15 +342,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * @ring: ring we want to submit job to * @job: job who wants to use the VMID * @id: resulting VMID - * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_vmid **id, - struct dma_fence **fence) + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; @@ -429,7 +427,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); + r = amdgpu_vmid_grab_used(vm, ring, job, &id); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 4766e99dd98f..263ce1811cc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,33 +33,17 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" -static int isp_sw_init(void *handle) -{ - return 0; -} - -static int isp_sw_fini(void *handle) -{ - return 0; -} - /** * isp_hw_init - start and test isp block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_init(void *handle) +static int isp_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; - const struct amdgpu_ip_block *ip_block = - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP); - - if (!ip_block) - return -EINVAL; - if (isp->funcs->hw_init != NULL) return isp->funcs->hw_init(isp); @@ -69,13 +53,12 @@ static int isp_hw_init(void *handle) /** * isp_hw_fini - stop the hardware block * - * @handle: handle for amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int isp_hw_fini(void *handle) +static int isp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_isp *isp = &adev->isp; + struct amdgpu_isp *isp = &ip_block->adev->isp; if (isp->funcs->hw_fini != NULL) return isp->funcs->hw_fini(isp); @@ -83,16 +66,6 @@ static int isp_hw_fini(void *handle) return -ENODEV; } -static int isp_suspend(void *handle) -{ - return 0; -} - -static int isp_resume(void *handle) -{ - return 0; -} - static int isp_load_fw_by_psp(struct amdgpu_device *adev) { const struct common_firmware_header *hdr; @@ -122,9 +95,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev) return r; } -static int isp_early_init(void *handle) +static int isp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_isp *isp = &adev->isp; switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { @@ -154,16 +128,6 @@ static bool isp_is_idle(void *handle) return true; } -static int isp_wait_for_idle(void *handle) -{ - return 0; -} - -static int isp_soft_reset(void *handle) -{ - return 0; -} - static int isp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -179,16 +143,9 @@ static int isp_set_powergating_state(void *handle, static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, - .late_init = NULL, - .sw_init = isp_sw_init, - .sw_fini = isp_sw_fini, .hw_init = isp_hw_init, .hw_fini = isp_hw_fini, - .suspend = isp_suspend, - .resume = isp_resume, .is_idle = isp_is_idle, - .wait_for_idle = isp_wait_for_idle, - .soft_reset = isp_soft_reset, .set_clockgating_state = isp_set_clockgating_state, .set_powergating_state = isp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 16f2605ac50b..b9d08bc96581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev, for (i = 0; i < adev->num_ip_blocks; i++) if (adev->ip_blocks[i].version->funcs->dump_ip_state) adev->ip_blocks[i].version->funcs - ->dump_ip_state((void *)adev); + ->dump_ip_state((void *)&adev->ip_blocks[i]); dev_info(adev->dev, "Dumping IP State Completed\n"); amdgpu_coredump(adev, true, false, job); @@ -137,6 +137,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* attempt a per ring reset */ if (amdgpu_gpu_recovery && ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); /* stop the scheduler, but don't mess with the * bad job yet because if ring reset fails * we'll fall back to full GPU reset. @@ -149,9 +150,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_inc(&ring->adev->gpu_reset_counter); amdgpu_fence_driver_force_completion(ring); if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); goto exit; } + dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); } if (amdgpu_device_should_recover_gpu(ring->adev)) { @@ -356,10 +358,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, if (r) goto error; - if (!fence && job->gang_submit) + if (job->gang_submit) fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); - while (!fence && job->vm && !job->vmid) { + if (!fence && job->vm && !job->vmid) { r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) { dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6df99cb00d9a..04eb51674596 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -47,7 +47,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) adev->jpeg.indirect_sram = true; for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; if (adev->jpeg.indirect_sram) { @@ -73,7 +73,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; amdgpu_bo_free_kernel( @@ -110,7 +110,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) unsigned int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) + if (adev->jpeg.harvest_config & (1U << i)) continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) @@ -342,3 +342,111 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_ip_fw_load(&adev->psp, &ucode); } + +/* + * debugfs for to enable/disable jpeg job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j))) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i, j; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (ring->sched.ready) + mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j); + } + } + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops, + amdgpu_debugfs_jpeg_sched_mask_get, + amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1)) + return; + sprintf(name, "amdgpu_jpeg_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_jpeg_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset); +} + +static DEVICE_ATTR(jpeg_reset_mask, 0444, + amdgpu_get_jpeg_reset_mask, NULL); + +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->jpeg.num_jpeg_inst) { + r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->jpeg.num_jpeg_inst) + device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index f9cdd873ac9b..3eb4a4653fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -128,6 +128,7 @@ struct amdgpu_jpeg { uint16_t inst_mask; uint8_t num_inst_per_aid; bool indirect_sram; + uint32_t supported_reset; }; int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); @@ -149,5 +150,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 18ee60378727..3ca03b5e0f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -348,6 +348,24 @@ static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgp return ret; } +static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, + struct mca_bank_entry *entry) +{ + bool ret; + + switch (type) { + case AMDGPU_MCA_ERROR_TYPE_CE: + ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]); + break; + case AMDGPU_MCA_ERROR_TYPE_UE: + default: + ret = true; + break; + } + + return ret; +} + static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set, struct ras_query_context *qctx) { @@ -373,7 +391,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc amdgpu_mca_bank_set_add_entry(mca_set, &entry); - amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); + if (amdgpu_mca_bank_should_dump(adev, type, &entry)) + amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 7d4b540340e0..59ec20b07a6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -104,7 +104,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) return 0; r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_DOMAIN_VRAM, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); @@ -192,17 +192,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev) (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]]; } - r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); - if (r) { - dev_err(adev->dev, - "(%d) read_val_offs alloc failed\n", r); - goto error; - } - adev->mes.read_val_gpu_addr = - adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); - adev->mes.read_val_ptr = - (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; - r = amdgpu_mes_doorbell_init(adev); if (r) goto error; @@ -223,8 +212,6 @@ error: amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); @@ -249,8 +236,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } - if (adev->mes.read_val_ptr) - amdgpu_device_wb_free(adev, adev->mes.read_val_offs); amdgpu_mes_doorbell_free(adev); @@ -905,7 +890,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.me_id = ring->me; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; - queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0; queue_input.wptr_addr = ring->wptr_gpu_addr; queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; @@ -921,10 +906,19 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) { struct mes_misc_op_input op_input; int r, val = 0; + uint32_t addr_offset = 0; + uint64_t read_val_gpu_addr; + uint32_t *read_val_ptr; + if (amdgpu_device_wb_get(adev, &addr_offset)) { + DRM_ERROR("critical bug! too many mes readers\n"); + goto error; + } + read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4); + read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset]; op_input.op = MES_MISC_OP_READ_REG; op_input.read_reg.reg_offset = reg; - op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; + op_input.read_reg.buffer_addr = read_val_gpu_addr; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes rreg is not supported!\n"); @@ -935,9 +929,11 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else - val = *(adev->mes.read_val_ptr); + val = *(read_val_ptr); error: + if (addr_offset) + amdgpu_device_wb_free(adev, addr_offset); return val; } @@ -1594,6 +1590,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) char ucode_prefix[30]; char fw_name[50]; bool need_retry = false; + u32 *ucode_ptr; int r; amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, @@ -1631,6 +1628,10 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) adev->mes.data_start_addr[pipe] = le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); + ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data + + sizeof(union amdgpu_firmware_header)); + adev->mes.fw_version[pipe] = + le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { int ucode, ucode_data; @@ -1677,6 +1678,29 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) return is_supported; } +/* Fix me -- node_id is used to identify the correct MES instances in the future */ +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable) +{ + struct mes_misc_op_input op_input = {0}; + int r; + + op_input.op = MES_MISC_OP_CHANGE_CONFIG; + op_input.change_config.option.limit_single_process = enable ? 1 : 0; + + if (!adev->mes.funcs->misc_op) { + dev_err(adev->dev, "mes change config is not supported!\n"); + r = -EINVAL; + goto error; + } + + r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + if (r) + dev_err(adev->dev, "failed to change_config.\n"); + +error: + return r; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 96788c0f42f1..c6f93cbd6739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -40,6 +40,7 @@ #define AMDGPU_MES_VERSION_MASK 0x00000fff #define AMDGPU_MES_API_VERSION_MASK 0x00fff000 #define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000 +#define AMDGPU_MES_MSCRATCH_SIZE 0x8000 enum amdgpu_mes_priority_level { AMDGPU_MES_PRIORITY_LEVEL_LOW = 0, @@ -75,6 +76,7 @@ struct amdgpu_mes { uint32_t sched_version; uint32_t kiq_version; + uint32_t fw_version[AMDGPU_MAX_MES_PIPES]; bool enable_legacy_queue_map; uint32_t total_max_queue; @@ -119,9 +121,6 @@ struct amdgpu_mes { uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES]; uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES]; uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES]; - uint32_t read_val_offs; - uint64_t read_val_gpu_addr; - uint32_t *read_val_ptr; uint32_t saved_flags; @@ -310,6 +309,7 @@ enum mes_misc_opcode { MES_MISC_OP_WRM_REG_WAIT, MES_MISC_OP_WRM_REG_WR_WAIT, MES_MISC_OP_SET_SHADER_DEBUGGER, + MES_MISC_OP_CHANGE_CONFIG, }; struct mes_misc_op_input { @@ -348,6 +348,21 @@ struct mes_misc_op_input { uint32_t tcp_watch_cntl[4]; uint32_t trap_en; } set_shader_debugger; + + struct { + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + }; + uint32_t all; + } option; + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; + } change_config; }; }; @@ -518,4 +533,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes) } bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev); + +int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable); + #endif /* __AMDGPU_MES_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index f61d117b0caf..79c2f807b9fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs { int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, u32 *supp_modes); + bool (*is_nps_switch_requested)(struct amdgpu_device *adev); u64 (*get_pcie_replay_count)(struct amdgpu_device *adev); void (*set_reg_remap)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 971419e3a9bb..6852d50caa89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -40,6 +40,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_vram_mgr.h" +#include "amdgpu_vm.h" /** * DOC: amdgpu_object @@ -1171,54 +1172,71 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, } void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int sz) { + const unsigned int domain_to_pl[] = { + [ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM, + [ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT, + [ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM, + [ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS, + [ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS, + [ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA, + [ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL, + }; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; + struct drm_gem_object *obj = &bo->tbo.base; uint64_t size = amdgpu_bo_size(bo); - struct drm_gem_object *obj; - bool shared; + unsigned int type; + + if (!res) { + /* + * If no backing store use one of the preferred domain for basic + * stats. We take the MSB since that should give a reasonable + * view. + */ + BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT || + TTM_PL_VRAM < TTM_PL_SYSTEM); + type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK); + if (!type) + return; + type--; + if (drm_WARN_ON_ONCE(&adev->ddev, + type >= ARRAY_SIZE(domain_to_pl))) + return; + type = domain_to_pl[type]; + } else { + type = res->mem_type; + } - /* Abort if the BO doesn't currently have a backing store */ - if (!res) + if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz)) return; - obj = &bo->tbo.base; - shared = drm_gem_object_is_shared_for_memory_stats(obj); - - switch (res->mem_type) { - case TTM_PL_VRAM: - stats->vram += size; - if (amdgpu_res_cpu_visible(adev, res)) - stats->visible_vram += size; - if (shared) - stats->vram_shared += size; - break; - case TTM_PL_TT: - stats->gtt += size; - if (shared) - stats->gtt_shared += size; - break; - case TTM_PL_SYSTEM: - default: - stats->cpu += size; - if (shared) - stats->cpu_shared += size; - break; + /* DRM stats common fields: */ + + if (drm_gem_object_is_shared_for_memory_stats(obj)) + stats[type].drm.shared += size; + else + stats[type].drm.private += size; + + if (res) { + stats[type].drm.resident += size; + + if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP)) + stats[type].drm.active += size; + else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) + stats[type].drm.purgeable += size; } + /* amdgpu specific stats: */ + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { - stats->requested_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->requested_visible_vram += size; - - if (res->mem_type != TTM_PL_VRAM) { - stats->evicted_vram += size; - if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - stats->evicted_visible_vram += size; - } + stats[TTM_PL_VRAM].requested += size; + if (type != TTM_PL_VRAM) + stats[TTM_PL_VRAM].evicted += size; } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { - stats->requested_gtt += size; + stats[TTM_PL_TT].requested += size; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 717e47b46167..be6769852ece 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -139,33 +139,6 @@ struct amdgpu_bo_vm { struct amdgpu_vm_bo_base entries[]; }; -struct amdgpu_mem_stats { - /* current VRAM usage, includes visible VRAM */ - uint64_t vram; - /* current shared VRAM usage, includes visible VRAM */ - uint64_t vram_shared; - /* current visible VRAM usage */ - uint64_t visible_vram; - /* current GTT usage */ - uint64_t gtt; - /* current shared GTT usage */ - uint64_t gtt_shared; - /* current system memory usage */ - uint64_t cpu; - /* current shared system memory usage */ - uint64_t cpu_shared; - /* sum of evicted buffers, includes visible VRAM */ - uint64_t evicted_vram; - /* sum of evicted buffers due to CPU access */ - uint64_t evicted_visible_vram; - /* how much userspace asked for, includes vis.VRAM */ - uint64_t requested_vram; - /* how much userspace asked for */ - uint64_t requested_visible_vram; - /* how much userspace asked for */ - uint64_t requested_gtt; -}; - static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -328,7 +301,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0b28b2cf1517..17cf10c0b72b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -159,9 +159,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp) return ret; } -static int psp_early_init(void *handle) +static int psp_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; psp->autoload_supported = true; @@ -421,9 +421,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, return ret; } -static int psp_sw_init(void *handle) +static int psp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; int ret; struct psp_runtime_boot_cfg_entry boot_cfg_entry; @@ -527,9 +527,9 @@ failed1: return ret; } -static int psp_sw_fini(void *handle) +static int psp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; struct psp_gfx_cmd_resp *cmd = psp->cmd; @@ -639,6 +639,8 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "AUTOLOAD_RLC"; case GFX_CMD_ID_BOOT_CFG: return "BOOT_CFG"; + case GFX_CMD_ID_CONFIG_SQ_PERFMON: + return "CONFIG_SQ_PERFMON"; default: return "UNKNOWN CMD"; } @@ -1043,6 +1045,31 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } +int psp_memory_partition(struct psp_context *psp, int mode) +{ + struct psp_gfx_cmd_resp *cmd; + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; + cmd->cmd.cmd_memory_part.mode = mode; + + dev_info(psp->adev->dev, + "Requesting %d memory partition change through PSP", mode); + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_err(psp->adev->dev, + "PSP request failed to change to NPS%d mode\n", mode); + + release_psp_cmd_buf(psp); + + return ret; +} + int psp_spatial_partition(struct psp_context *psp, int mode) { struct psp_gfx_cmd_resp *cmd; @@ -1807,6 +1834,9 @@ int psp_ras_initialize(struct psp_context *psp) ras_cmd->ras_in_message.init_flags.xcc_mask = adev->gfx.xcc_mask; ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + ras_cmd->ras_in_message.init_flags.nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(adev); ret = psp_ta_load(psp, &psp->ras_context.context); @@ -2264,6 +2294,19 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp) } } +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return false; + + if (psp->funcs && psp->funcs->is_reload_needed) + return psp->funcs->is_reload_needed(psp); + + return false; +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2958,10 +3001,10 @@ failed: return ret; } -static int psp_hw_init(void *handle) +static int psp_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; mutex_lock(&adev->firmware.mutex); /* @@ -2987,9 +3030,9 @@ failed: return -EINVAL; } -static int psp_hw_fini(void *handle) +static int psp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (psp->ta_fw) { @@ -3011,10 +3054,10 @@ static int psp_hw_fini(void *handle) return 0; } -static int psp_suspend(void *handle) +static int psp_suspend(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; if (adev->gmc.xgmi.num_physical_nodes > 1 && @@ -3074,10 +3117,10 @@ out: return ret; } -static int psp_resume(void *handle) +static int psp_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct psp_context *psp = &adev->psp; dev_info(adev->dev, "PSP is resuming...\n"); @@ -3523,6 +3566,36 @@ out: return err; } +static bool is_ta_fw_applicable(struct psp_context *psp, + const struct psp_fw_bin_desc *desc) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t fw_version; + + switch (desc->fw_type) { + case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: + /* for now, AUX TA only exists on 13.0.6 ta bin, + * from v20.00.0x.14 + */ + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == + IP_VERSION(13, 0, 6)) { + fw_version = le32_to_cpu(desc->fw_version); + + if (adev->flags & AMD_IS_APU && + (fw_version & 0xff) >= 0x14) + return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; + else + return desc->fw_type == TA_FW_TYPE_PSP_XGMI; + } + break; + default: + break; + } + + return true; +} + static int parse_ta_bin_descriptor(struct psp_context *psp, const struct psp_fw_bin_desc *desc, const struct ta_firmware_header_v2_0 *ta_hdr) @@ -3532,6 +3605,9 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, if (!psp || !desc || !ta_hdr) return -EINVAL; + if (!is_ta_fw_applicable(psp, desc)) + return 0; + ucode_start_addr = (uint8_t *)ta_hdr + le32_to_cpu(desc->offset_bytes) + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); @@ -3544,6 +3620,7 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: + case TA_FW_TYPE_PSP_XGMI_AUX: psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; @@ -3736,8 +3813,44 @@ out: return err; } +int psp_config_sq_perfmon(struct psp_context *psp, + uint32_t xcp_id, bool core_override_enable, + bool reg_override_enable, bool perfmon_override_enable) +{ + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (xcp_id > MAX_XCP) { + dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); + return -EINVAL; + } + + if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { + dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", + amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); + return -EINVAL; + } + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; + cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); + cmd->cmd.config_sq_perfmon.core_override = core_override_enable; + cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; + cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", + xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); + + release_psp_cmd_buf(psp); + return ret; +} + static int psp_set_clockgating_state(void *handle, - enum amd_clockgating_state state) + enum amd_clockgating_state state) { return 0; } @@ -4019,17 +4132,12 @@ const struct attribute_group amdgpu_flash_attr_group = { const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, - .late_init = NULL, .sw_init = psp_sw_init, .sw_fini = psp_sw_fini, .hw_init = psp_hw_init, .hw_fini = psp_hw_fini, .suspend = psp_suspend, .resume = psp_resume, - .is_idle = NULL, - .check_soft_reset = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, .set_clockgating_state = psp_set_clockgating_state, .set_powergating_state = psp_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index e8abbbcb4326..567cb1f924ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -139,6 +139,7 @@ struct psp_funcs { int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); bool (*is_aux_sos_load_required)(struct psp_context *psp); + bool (*is_reload_needed)(struct psp_context *psp); }; struct ta_funcs { @@ -552,9 +553,15 @@ int psp_load_fw_list(struct psp_context *psp, void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); int psp_spatial_partition(struct psp_context *psp, int mode); +int psp_memory_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); bool amdgpu_psp_get_ras_capability(struct psp_context *psp); + +int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id, + bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); +bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a1395c5fff1..1bc95b0cdbb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1214,6 +1214,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } } +static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev, + struct ras_query_if *query_if, + struct ras_err_data *err_data, + struct ras_query_context *qctx) +{ + unsigned long new_ue, new_ce, new_de; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head); + const char *blk_name = get_ras_block_str(&query_if->head); + u64 event_id = qctx->evid.event_id; + + new_ce = err_data->ce_count - obj->err_data.ce_count; + new_ue = err_data->ue_count - obj->err_data.ue_count; + new_de = err_data->de_count - obj->err_data.de_count; + + if (new_ce) { + RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors " + "detected in %s block\n", + new_ce, + blk_name); + } + + if (new_ue) { + RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors " + "detected in %s block\n", + new_ue, + blk_name); + } + + if (new_de) { + RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors " + "detected in %s block\n", + new_de, + blk_name); + } +} + static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) { struct ras_err_node *err_node; @@ -1237,6 +1273,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s } } +static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj, + struct ras_err_data *err_data) +{ + /* Host reports absolute counts */ + obj->err_data.ue_count = err_data->ue_count; + obj->err_data.ce_count = err_data->ce_count; + obj->err_data.de_count = err_data->de_count; +} + static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) { struct ras_common_if head; @@ -1323,7 +1368,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) return -EINVAL; - if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + return amdgpu_virt_req_ras_err_count(adev, blk, err_data); + } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { amdgpu_ras_get_ecc_info(adev, err_data); } else { @@ -1405,14 +1452,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, if (ret) goto out_fini_err_data; - amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) { + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); + amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); + } else { + /* Host provides absolute error counts. First generate the report + * using the previous VF internal count against new host count. + * Then Update VF internal count. + */ + amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx); + amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data); + } info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; info->de_count = obj->err_data.de_count; - amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); - out_fini_err_data: amdgpu_ras_error_data_fini(&err_data); @@ -2605,6 +2660,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; reset_context.src = AMDGPU_RESET_SRC_RAS; + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); /* Perform full reset in fatal error mode */ if (!amdgpu_ras_is_poison_mode_supported(ras->adev)) @@ -3146,7 +3202,42 @@ static int amdgpu_ras_page_retirement_thread(void *param) return 0; } -int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + + if (!con || amdgpu_sriov_vf(adev)) + return 0; + + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + + if (ret) + return ret; + + /* HW not usable */ + if (amdgpu_ras_is_rma(adev)) + return -EHWPOISON; + + if (con->eeprom_control.ras_num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + return ret; + + amdgpu_dpm_send_hbm_bad_pages_num( + adev, con->eeprom_control.ras_num_recs); + + if (con->update_channel_flag == true) { + amdgpu_dpm_send_hbm_bad_channel_flag( + adev, con->eeprom_control.bad_channel_bitmap); + con->update_channel_flag = false; + } + } + + return ret; +} + +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; @@ -3181,31 +3272,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); - /* Todo: During test the SMU might fail to read the eeprom through I2C - * when the GPU is pending on XGMI reset during probe time - * (Mostly after second bus reset), skip it now - */ - if (adev->gmc.xgmi.pending_reset) - return 0; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - /* - * This calling fails when is_rma is true or - * ret != 0. - */ - if (amdgpu_ras_is_rma(adev) || ret) - goto free; - - if (con->eeprom_control.ras_num_recs) { - ret = amdgpu_ras_load_bad_pages(adev); + if (init_bp_info) { + ret = amdgpu_ras_init_badpage_info(adev); if (ret) goto free; - - amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); - - if (con->update_channel_flag == true) { - amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); - con->update_channel_flag = false; - } } mutex_init(&con->page_rsv_lock); @@ -3438,6 +3508,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) if (!amdgpu_ras_asic_supported(adev)) return; + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_get_ras_capability(adev)) + goto init_ras_enabled_flag; + } + /* query ras capability from psp */ if (amdgpu_psp_get_ras_capability(&adev->psp)) goto init_ras_enabled_flag; @@ -3910,7 +3985,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) } /* Guest side doesn't need init ras feature */ - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev)) return 0; list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { @@ -4294,8 +4369,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; } - if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + int hive_ras_recovery = 0; + + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + /* In the case of multiple GPUs, after a GPU has started + * resetting all GPUs on hive, other GPUs do not need to + * trigger GPU reset again. + */ + if (!hive_ras_recovery) + amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + else + atomic_set(&ras->in_recovery, 0); + } else { + flush_work(&ras->recovery_work); amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); + } + return 0; } @@ -4358,11 +4452,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, return false; } - if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) + if (amdgpu_sriov_vf(adev)) { + *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY; + } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) { *error_query_mode = (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; - else + } else { *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; + } return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 669720a9c60a..6db772ecfee4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -365,6 +365,7 @@ enum amdgpu_ras_error_query_mode { AMDGPU_RAS_INVALID_ERROR_QUERY = 0, AMDGPU_RAS_DIRECT_ERROR_QUERY = 1, AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2, + AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3, }; /* ras error status reisger fields */ @@ -736,8 +737,8 @@ struct amdgpu_ras_block_hw_ops { * 8: feature disable */ - -int amdgpu_ras_recovery_init(struct amdgpu_device *adev); +int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev); +int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 66c1a868c0e1..24dae7cdbe95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -26,6 +26,156 @@ #include "sienna_cichlid.h" #include "smu_v13_0_10.h" +static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev) +{ + int i; + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].status.hw) + continue; + /* displays are handled in phase1 */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) + continue; + + /* XXX handle errors */ + amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + adev->ip_blocks[i].status.hw = false; + } + + /* VCN FW shared region is in frambuffer, there are some flags + * initialized in that region during sw_init. Make sure the region is + * backed up. + */ + amdgpu_vcn_save_vcpu_bo(adev); + + return 0; +} + +static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev; + int r; + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + amdgpu_unregister_gpu_instance(tmp_adev); + r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: prepare for reset failed"); + return r; + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r) + return r; + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + if (!tmp_adev->kfd.init_complete) { + kgd2kfd_init_zone_device(tmp_adev); + amdgpu_amdkfd_device_init(tmp_adev); + amdgpu_amdkfd_drm_client_create(tmp_adev); + } + } + + return r; +} + +static int amdgpu_reset_xgmi_reset_on_init_perform_reset( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *tmp_adev = NULL; + int r; + + dev_dbg(adev->dev, "xgmi roi - hw reset\n"); + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_lock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = + amdgpu_asic_reset_method(adev); + } + r = 0; + /* Mode1 reset needs to be triggered on all devices together */ + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + if (r) { + dev_err(tmp_adev->dev, + "xgmi reset on init: reset failed with error, %d", + r); + break; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + + list_for_each_entry(tmp_adev, reset_device_list, reset_list) { + mutex_unlock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE; + } + + return r; +} + +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context) +{ + struct list_head *reset_device_list = reset_context->reset_device_list; + struct amdgpu_device *adev; + int r; + + if (!reset_device_list || list_empty(reset_device_list) || + list_is_singular(reset_device_list)) + return -EINVAL; + + adev = list_first_entry(reset_device_list, struct amdgpu_device, + reset_list); + r = amdgpu_reset_prepare_hwcontext(adev, reset_context); + if (r) + return r; + + r = amdgpu_reset_perform_reset(adev, reset_context); + + return r; +} + +struct amdgpu_reset_handler xgmi_reset_on_init_handler = { + .reset_method = AMD_RESET_METHOD_ON_INIT, + .prepare_env = NULL, + .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt, + .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset, + .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt, + .restore_env = NULL, + .do_reset = NULL, +}; + int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 1cb920abc2fe..f8628bc898df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -153,4 +153,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf, for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ (handler = (*reset_ctl->reset_handlers)[i]); \ ++i) + +extern struct amdgpu_reset_handler xgmi_reset_on_init_handler; +int amdgpu_reset_do_xgmi_reset_on_init( + struct amdgpu_reset_context *reset_context); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 690976665cf6..a6e28fe3f8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -108,10 +108,22 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) */ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; + uint32_t occupied, chunk1, chunk2; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + occupied = ring->wptr & ring->buf_mask; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count) ? count : chunk1; + chunk2 = count - chunk1; + + if (chunk1) + memset32(&ring->ring[occupied], ring->funcs->nop, chunk1); + + if (chunk2) + memset32(ring->ring, ring->funcs->nop, chunk2); + + ring->wptr += count; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count; } /** @@ -141,6 +153,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) { uint32_t count; + if (ring->count_dw < 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + /* We pad to match fetch size */ count = ring->funcs->align_mask + 1 - (ring->wptr & ring->funcs->align_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index f93f51002201..36fc9578c53c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -246,7 +246,7 @@ struct amdgpu_ring { struct drm_gpu_scheduler sched; struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; + uint32_t *ring; unsigned rptr_offs; u64 rptr_gpu_addr; volatile u32 *rptr_cpu_addr; @@ -288,7 +288,7 @@ struct amdgpu_ring { u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; unsigned int set_q_mode_offs; - volatile u32 *set_q_mode_ptr; + u32 *set_q_mode_ptr; u64 set_q_mode_token; unsigned vm_hub; unsigned vm_inv_eng; @@ -377,8 +377,6 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) { - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); ring->ring[ring->wptr++ & ring->buf_mask] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; @@ -388,13 +386,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) { unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; chunk1 = ring->buf_mask + 1 - occupied; chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; chunk2 = count_dw - chunk1; @@ -402,12 +395,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, chunk2 <<= 2; if (chunk1) - memcpy(dst, src, chunk1); + memcpy(&ring->ring[occupied], src, chunk1); if (chunk2) { src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); + memcpy(ring->ring, src, chunk2); } ring->wptr += count_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index b0a8abc7a8ec..341beec59537 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -35,21 +35,19 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, int32_t priority) { - struct fd f = fdget(fd); + CLASS(fd, f)(fd); struct amdgpu_fpriv *fpriv; struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx *ctx; uint32_t id; int r; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); - if (r) { - fdput(f); + if (r) return r; - } mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); @@ -57,7 +55,6 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, amdgpu_ctx_priority_override(ctx, priority); mutex_unlock(&mgr->lock); - fdput(f); return 0; } @@ -66,31 +63,25 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, unsigned ctx_id, int32_t priority) { - struct fd f = fdget(fd); + CLASS(fd, f)(fd); struct amdgpu_fpriv *fpriv; struct amdgpu_ctx *ctx; int r; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); - if (r) { - fdput(f); + if (r) return r; - } ctx = amdgpu_ctx_get(fpriv, ctx_id); - if (!ctx) { - fdput(f); + if (!ctx) return -EINVAL; - } amdgpu_ctx_priority_override(ctx, priority); amdgpu_ctx_put(ctx); - fdput(f); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 183a976ba29d..8c89b69edc20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -343,3 +343,114 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev) return 0; } + +/* + * debugfs for to enable/disable sdma job submission to specific core. + */ +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + + mask = (1 << adev->sdma.num_instances) - 1; + if ((val & mask) == 0) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (val & (1 << i)) + ring->sched.ready = true; + else + ring->sched.ready = false; + } + /* publish sched.ready flag update effective immediately across smp */ + smp_rmb(); + return 0; +} + +static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)data; + u32 i; + u64 mask = 0; + struct amdgpu_ring *ring; + + if (!adev) + return -ENODEV; + for (i = 0; i < adev->sdma.num_instances; ++i) { + ring = &adev->sdma.instance[i].ring; + if (ring->sched.ready) + mask |= 1 << i; + } + + *val = mask; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops, + amdgpu_debugfs_sdma_sched_mask_get, + amdgpu_debugfs_sdma_sched_mask_set, "%llx\n"); + +#endif + +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + if (!(adev->sdma.num_instances > 1)) + return; + sprintf(name, "amdgpu_sdma_sched_mask"); + debugfs_create_file(name, 0600, root, adev, + &amdgpu_debugfs_sdma_sched_mask_fops); +#endif +} + +static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset); +} + +static DEVICE_ATTR(sdma_reset_mask, 0444, + amdgpu_get_sdma_reset_mask, NULL); + +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (!amdgpu_gpu_recovery) + return r; + + if (adev->sdma.num_instances) { + r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_gpu_recovery) + return; + + if (adev->sdma.num_instances) + device_remove_file(adev->dev, &dev_attr_sdma_reset_mask); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 087ce0f6fa07..2db58b5812a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -116,6 +116,7 @@ struct amdgpu_sdma { struct ras_common_if *ras_if; struct amdgpu_sdma_ras *ras; uint32_t *ip_dump; + uint32_t supported_reset; }; /* @@ -175,5 +176,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance, void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev); - +void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev); +int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev); +void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 74adb983ab03..9f922ec50ea2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, /* Map SG to device */ r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); if (r) - goto release_sg; + goto release_sg_table; /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, @@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, return 0; +release_sg_table: + sg_free_table(ttm->sg); release_sg: kfree(ttm->sg); ttm->sg = NULL; @@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); + dma_set_max_seg_size(adev->dev, UINT_MAX); /* No others user of address space so set it to 0 */ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 138d80017f35..2852a6064c9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,7 @@ #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) +#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4e23419b92d4..4150ec0aa10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -163,6 +163,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_RAP, TA_FW_TYPE_PSP_SECUREDISPLAY, + TA_FW_TYPE_PSP_XGMI_AUX, TA_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index bb7b9b2eaac1..896f3609b0ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -318,6 +318,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r if (r) return r; + if (amdgpu_sriov_vf(adev)) + return r; + if (amdgpu_ras_is_supported(adev, ras_block->block)) { r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index 6162582d0aa2..bd2d3863c3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -765,9 +765,9 @@ static int umsch_mm_init(struct amdgpu_device *adev) } -static int umsch_mm_early_init(void *handle) +static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -784,9 +784,9 @@ static int umsch_mm_early_init(void *handle) return 0; } -static int umsch_mm_late_init(void *handle) +static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) return 0; @@ -794,9 +794,9 @@ static int umsch_mm_late_init(void *handle) return umsch_mm_test(adev); } -static int umsch_mm_sw_init(void *handle) +static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_init(adev); @@ -815,9 +815,9 @@ static int umsch_mm_sw_init(void *handle) return 0; } -static int umsch_mm_sw_fini(void *handle) +static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; release_firmware(adev->umsch_mm.fw); adev->umsch_mm.fw = NULL; @@ -839,9 +839,9 @@ static int umsch_mm_sw_fini(void *handle) return 0; } -static int umsch_mm_hw_init(void *handle) +static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = umsch_mm_load_microcode(&adev->umsch_mm); @@ -857,9 +857,9 @@ static int umsch_mm_hw_init(void *handle) return 0; } -static int umsch_mm_hw_fini(void *handle) +static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; umsch_mm_ring_stop(&adev->umsch_mm); @@ -873,18 +873,14 @@ static int umsch_mm_hw_fini(void *handle) return 0; } -static int umsch_mm_suspend(void *handle) +static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_fini(adev); + return umsch_mm_hw_fini(ip_block); } -static int umsch_mm_resume(void *handle) +static int umsch_mm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return umsch_mm_hw_init(adev); + return umsch_mm_hw_init(ip_block); } void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm) @@ -997,8 +993,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { .hw_fini = umsch_mm_hw_fini, .suspend = umsch_mm_suspend, .resume = umsch_mm_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 43f44cc201cb..aecb78e0519f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -294,21 +294,12 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t return ret; } -int amdgpu_vcn_suspend(struct amdgpu_device *adev) +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev) { unsigned int size; void *ptr; int i, idx; - bool in_ras_intr = amdgpu_ras_intr_triggered(); - - cancel_delayed_work_sync(&adev->vcn.idle_work); - - /* err_event_athub will corrupt VCPU buffer, so we need to - * restore fw data and clear buffer in amdgpu_vcn_resume() */ - if (in_ras_intr) - return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -327,9 +318,24 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) drm_dev_exit(idx); } } + return 0; } +int amdgpu_vcn_suspend(struct amdgpu_device *adev) +{ + bool in_ras_intr = amdgpu_ras_intr_triggered(); + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + /* err_event_athub will corrupt VCPU buffer, so we need to + * restore fw data and clear buffer in amdgpu_vcn_resume() */ + if (in_ras_intr) + return 0; + + return amdgpu_vcn_save_vcpu_bo(adev); +} + int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned int size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 2a1f3dbb14d3..765b809d48a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -518,5 +518,6 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id); +int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b6397d3229e1..c704e9803e11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->unique_id = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; + adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all; + adev->virt.ras_telemetry_en_caps.all = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all; break; default: dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); @@ -703,6 +706,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -710,6 +715,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -1144,3 +1151,185 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) return xnack_mode; } + +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_sriov_ras_caps_en(adev)) + return false; + + if (adev->virt.ras_en_caps.bits.block_umc) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC); + if (adev->virt.ras_en_caps.bits.block_sdma) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA); + if (adev->virt.ras_en_caps.bits.block_gfx) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX); + if (adev->virt.ras_en_caps.bits.block_mmhub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB); + if (adev->virt.ras_en_caps.bits.block_athub) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB); + if (adev->virt.ras_en_caps.bits.block_pcie_bif) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF); + if (adev->virt.ras_en_caps.bits.block_hdp) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP); + if (adev->virt.ras_en_caps.bits.block_xgmi_wafl) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL); + if (adev->virt.ras_en_caps.bits.block_df) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF); + if (adev->virt.ras_en_caps.bits.block_smn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN); + if (adev->virt.ras_en_caps.bits.block_sem) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM); + if (adev->virt.ras_en_caps.bits.block_mp0) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0); + if (adev->virt.ras_en_caps.bits.block_mp1) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1); + if (adev->virt.ras_en_caps.bits.block_fuse) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE); + if (adev->virt.ras_en_caps.bits.block_mca) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA); + if (adev->virt.ras_en_caps.bits.block_vcn) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN); + if (adev->virt.ras_en_caps.bits.block_jpeg) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG); + if (adev->virt.ras_en_caps.bits.block_ih) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH); + if (adev->virt.ras_en_caps.bits.block_mpio) + adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO); + + if (adev->virt.ras_en_caps.bits.poison_propogation_mode) + con->poison_supported = true; /* Poison is handled by host */ + + return true; +} + +static inline enum amd_sriov_ras_telemetry_gpu_block +amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) { + switch (block) { + case AMDGPU_RAS_BLOCK__UMC: + return RAS_TELEMETRY_GPU_BLOCK_UMC; + case AMDGPU_RAS_BLOCK__SDMA: + return RAS_TELEMETRY_GPU_BLOCK_SDMA; + case AMDGPU_RAS_BLOCK__GFX: + return RAS_TELEMETRY_GPU_BLOCK_GFX; + case AMDGPU_RAS_BLOCK__MMHUB: + return RAS_TELEMETRY_GPU_BLOCK_MMHUB; + case AMDGPU_RAS_BLOCK__ATHUB: + return RAS_TELEMETRY_GPU_BLOCK_ATHUB; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF; + case AMDGPU_RAS_BLOCK__HDP: + return RAS_TELEMETRY_GPU_BLOCK_HDP; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL; + case AMDGPU_RAS_BLOCK__DF: + return RAS_TELEMETRY_GPU_BLOCK_DF; + case AMDGPU_RAS_BLOCK__SMN: + return RAS_TELEMETRY_GPU_BLOCK_SMN; + case AMDGPU_RAS_BLOCK__SEM: + return RAS_TELEMETRY_GPU_BLOCK_SEM; + case AMDGPU_RAS_BLOCK__MP0: + return RAS_TELEMETRY_GPU_BLOCK_MP0; + case AMDGPU_RAS_BLOCK__MP1: + return RAS_TELEMETRY_GPU_BLOCK_MP1; + case AMDGPU_RAS_BLOCK__FUSE: + return RAS_TELEMETRY_GPU_BLOCK_FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return RAS_TELEMETRY_GPU_BLOCK_MCA; + case AMDGPU_RAS_BLOCK__VCN: + return RAS_TELEMETRY_GPU_BLOCK_VCN; + case AMDGPU_RAS_BLOCK__JPEG: + return RAS_TELEMETRY_GPU_BLOCK_JPEG; + case AMDGPU_RAS_BLOCK__IH: + return RAS_TELEMETRY_GPU_BLOCK_IH; + case AMDGPU_RAS_BLOCK__MPIO: + return RAS_TELEMETRY_GPU_BLOCK_MPIO; + default: + dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); + return RAS_TELEMETRY_GPU_BLOCK_COUNT; + } +} + +static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry) +{ + struct amd_sriov_ras_telemetry_error_count *tmp = NULL; + uint32_t checksum, used_size; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + tmp = kmalloc(used_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + memcpy(tmp, &host_telemetry->body.error_count, used_size); + + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) + goto out; + + memcpy(&adev->virt.count_cache, tmp, + min(used_size, sizeof(adev->virt.count_cache))); +out: + kfree(tmp); + + return 0; +} + +static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update) +{ + struct amdgpu_virt *virt = &adev->virt; + + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host + * will ignore incoming guest messages. Ratelimit the guest messages to + * prevent guest self DOS. + */ + if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) { + if (!virt->ops->req_ras_err_count(adev)) + amdgpu_virt_cache_host_error_counts(adev, + adev->virt.fw_reserve.ras_telemetry); + } + + return 0; +} + +/* Bypass ACA interface and query ECC counts directly from host */ +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data) +{ + enum amd_sriov_ras_telemetry_gpu_block sriov_block; + + sriov_block = amdgpu_ras_block_to_sriov(adev, block); + + if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT || + !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block)) + return -EOPNOTSUPP; + + /* Host Access may be lost during reset, just return last cached data. */ + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_req_ras_err_count_internal(adev, false); + up_read(&adev->reset_domain->sem); + } + + err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count; + err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count; + err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count; + + return 0; +} + +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev) +{ + unsigned long ue_count, ce_count; + + if (amdgpu_sriov_ras_telemetry_en(adev)) { + amdgpu_virt_req_ras_err_count_internal(adev, true); + amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b650a2032c42..5381b8d596e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -95,6 +95,7 @@ struct amdgpu_virt_ops { void (*ras_poison_handler)(struct amdgpu_device *adev, enum amdgpu_ras_block block); bool (*rcvd_ras_intr)(struct amdgpu_device *adev); + int (*req_ras_err_count)(struct amdgpu_device *adev); }; /* @@ -103,6 +104,7 @@ struct amdgpu_virt_ops { struct amdgpu_virt_fw_reserve { struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; + void *ras_telemetry; unsigned int checksum_key; }; @@ -136,6 +138,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), /* MES info */ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), + AMDGIM_FEATURE_RAS_CAPS = (1 << 9), + AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -276,6 +280,12 @@ struct amdgpu_virt { uint32_t autoload_ucode_id; struct mutex rlcg_reg_lock; + + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + + struct ratelimit_state ras_telemetry_rs; + struct amd_sriov_ras_telemetry_error_count count_cache; }; struct amdgpu_video_codec_info; @@ -320,6 +330,15 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_vf_mmio_access_protection(adev) \ ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) +#define amdgpu_sriov_ras_caps_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS) + +#define amdgpu_sriov_ras_telemetry_en(adev) \ +(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry) + +#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \ +(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk)) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -383,4 +402,8 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag); u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id); +bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev); +int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block, + struct ras_err_data *err_data); +int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d4c2afafbb73..8bf28d336807 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -493,10 +493,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int amdgpu_vkms_sw_init(void *handle) +static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); @@ -536,9 +536,9 @@ static int amdgpu_vkms_sw_init(void *handle) return 0; } -static int amdgpu_vkms_sw_fini(void *handle) +static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) @@ -555,9 +555,9 @@ static int amdgpu_vkms_sw_fini(void *handle) return 0; } -static int amdgpu_vkms_hw_init(void *handle) +static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -600,31 +600,31 @@ static int amdgpu_vkms_hw_init(void *handle) return 0; } -static int amdgpu_vkms_hw_fini(void *handle) +static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int amdgpu_vkms_suspend(void *handle) +static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = drm_mode_config_helper_suspend(adev_to_drm(adev)); if (r) return r; - return amdgpu_vkms_hw_fini(handle); + + return 0; } -static int amdgpu_vkms_resume(void *handle) +static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vkms_hw_init(handle); + r = amdgpu_vkms_hw_init(ip_block); if (r) return r; - return drm_mode_config_helper_resume(adev_to_drm(adev)); + return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev)); } static bool amdgpu_vkms_is_idle(void *handle) @@ -632,16 +632,6 @@ static bool amdgpu_vkms_is_idle(void *handle) return true; } -static int amdgpu_vkms_wait_for_idle(void *handle) -{ - return 0; -} - -static int amdgpu_vkms_soft_reset(void *handle) -{ - return 0; -} - static int amdgpu_vkms_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -656,8 +646,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle, static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .name = "amdgpu_vkms", - .early_init = NULL, - .late_init = NULL, .sw_init = amdgpu_vkms_sw_init, .sw_fini = amdgpu_vkms_sw_fini, .hw_init = amdgpu_vkms_hw_init, @@ -665,12 +653,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .suspend = amdgpu_vkms_suspend, .resume = amdgpu_vkms_resume, .is_idle = amdgpu_vkms_is_idle, - .wait_for_idle = amdgpu_vkms_wait_for_idle, - .soft_reset = amdgpu_vkms_soft_reset, .set_clockgating_state = amdgpu_vkms_set_clockgating_state, .set_powergating_state = amdgpu_vkms_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6005280f5f38..8d9bf7a0857f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1083,7 +1083,8 @@ error_free: } static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; @@ -1099,34 +1100,35 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, !dma_resv_trylock(bo->tbo.base.resv)) return; - amdgpu_bo_get_memory(bo, stats); + amdgpu_bo_get_memory(bo, stats, size); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) dma_resv_unlock(bo->tbo.base.resv); } void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats) + struct amdgpu_mem_stats *stats, + unsigned int size) { struct amdgpu_bo_va *bo_va, *tmp; spin_lock(&vm->status_lock); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) - amdgpu_vm_bo_get_memory(bo_va, stats); + amdgpu_vm_bo_get_memory(bo_va, stats, size); spin_unlock(&vm->status_lock); } @@ -1159,7 +1161,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, int r; amdgpu_sync_create(&sync); - if (clear || !bo) { + if (clear) { mem = NULL; /* Implicitly sync to command submissions in the same VM before @@ -1174,6 +1176,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (r) goto error_free; } + } else if (!bo) { + mem = NULL; + + /* PRT map operations don't need to sync to anything. */ } else { struct drm_gem_object *obj = &bo->tbo.base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 52dd7cdfdc81..5d119ac26c4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -42,7 +42,6 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; struct amdgpu_bo_vm; -struct amdgpu_mem_stats; /* * GPUVM handling @@ -322,6 +321,16 @@ struct amdgpu_vm_fault_info { unsigned int vmhub; }; +struct amdgpu_mem_stats { + struct drm_memory_stats drm; + + /* buffers that requested this placement */ + uint64_t requested; + /* buffers that requested this placement + * but are currently evicted */ + uint64_t evicted; +}; + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; @@ -567,7 +576,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_memory(struct amdgpu_vm *vm, - struct amdgpu_mem_stats *stats); + struct amdgpu_mem_stats *stats, + unsigned int size); int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 5acd20ff5979..3e6f9dfb61bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -295,9 +295,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe) return 0; } -static int vpe_early_init(void *handle) +static int vpe_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { @@ -356,9 +356,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe) return 0; } -static int vpe_sw_init(void *handle) +static int vpe_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -377,18 +377,26 @@ static int vpe_sw_init(void *handle) ret = vpe_init_microcode(vpe); if (ret) goto out; + + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vpe.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vpe.ring); + ret = amdgpu_vpe_sysfs_reset_mask_init(adev); + if (ret) + goto out; out: return ret; } -static int vpe_sw_fini(void *handle) +static int vpe_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; release_firmware(vpe->fw); vpe->fw = NULL; + amdgpu_vpe_sysfs_reset_mask_fini(adev); vpe_ring_fini(vpe); amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, @@ -398,9 +406,9 @@ static int vpe_sw_fini(void *handle) return 0; } -static int vpe_hw_init(void *handle) +static int vpe_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; int ret; @@ -421,9 +429,9 @@ static int vpe_hw_init(void *handle) return 0; } -static int vpe_hw_fini(void *handle) +static int vpe_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_vpe *vpe = &adev->vpe; vpe_ring_stop(vpe); @@ -434,20 +442,18 @@ static int vpe_hw_fini(void *handle) return 0; } -static int vpe_suspend(void *handle) +static int vpe_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vpe.idle_work); - return vpe_hw_fini(adev); + return vpe_hw_fini(ip_block); } -static int vpe_resume(void *handle) +static int vpe_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vpe_hw_init(adev); + return vpe_hw_init(ip_block); } static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) @@ -867,6 +873,41 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring) schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } +static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + if (!adev) + return -ENODEV; + + return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset); +} + +static DEVICE_ATTR(vpe_reset_mask, 0444, + amdgpu_get_vpe_reset_mask, NULL); + +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->vpe.num_instances) { + r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask); + if (r) + return r; + } + + return r; +} + +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev) +{ + if (adev->vpe.num_instances) + device_remove_file(adev->dev, &dev_attr_vpe_reset_mask); +} + static const struct amdgpu_ring_funcs vpe_ring_funcs = { .type = AMDGPU_RING_TYPE_VPE, .align_mask = 0xf, @@ -908,14 +949,12 @@ static void vpe_set_ring_funcs(struct amdgpu_device *adev) const struct amd_ip_funcs vpe_ip_funcs = { .name = "vpe_v6_1", .early_init = vpe_early_init, - .late_init = NULL, .sw_init = vpe_sw_init, .sw_fini = vpe_sw_fini, .hw_init = vpe_hw_init, .hw_fini = vpe_hw_fini, .suspend = vpe_suspend, .resume = vpe_resume, - .soft_reset = NULL, .set_clockgating_state = vpe_set_clockgating_state, .set_powergating_state = vpe_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h index 231d86d0953e..695da740a97e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -79,6 +79,7 @@ struct amdgpu_vpe { uint32_t num_instances; bool collaborate_mode; + uint32_t supported_reset; }; int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); @@ -86,6 +87,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); +void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev); +int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev); #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index a6d456ec6aeb..e209b5e101df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -433,3 +433,292 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ + static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ + struct amdgpu_xcp_res_details *xcp_res, char *buf) \ + { \ + return sysfs_emit(buf, "%d\n", xcp_res->_name); \ + } + +struct amdgpu_xcp_res_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf); +}; + +#define XCP_CFG_SYSFS_RES_ATTR(_name) \ + struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \ + .attr = { .name = __stringify(_name), .mode = 0400 }, \ + .show = amdgpu_xcp_res_sysfs_##_name##_show, \ + } + +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst) +XCP_CFG_SYSFS_RES_ATTR(num_inst); +XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared) +XCP_CFG_SYSFS_RES_ATTR(num_shared); + +#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr + +static struct attribute *xcp_cfg_res_sysfs_attrs[] = { + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst), + &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL +}; + +static const char *xcp_desc[] = { + [AMDGPU_SPX_PARTITION_MODE] = "SPX", + [AMDGPU_DPX_PARTITION_MODE] = "DPX", + [AMDGPU_TPX_PARTITION_MODE] = "TPX", + [AMDGPU_QPX_PARTITION_MODE] = "QPX", + [AMDGPU_CPX_PARTITION_MODE] = "CPX", +}; + +static const char *nps_desc[] = { + [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN", + [AMDGPU_NPS1_PARTITION_MODE] = "NPS1", + [AMDGPU_NPS2_PARTITION_MODE] = "NPS2", + [AMDGPU_NPS3_PARTITION_MODE] = "NPS3", + [AMDGPU_NPS4_PARTITION_MODE] = "NPS4", + [AMDGPU_NPS6_PARTITION_MODE] = "NPS6", + [AMDGPU_NPS8_PARTITION_MODE] = "NPS8", +}; + +ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs); + +#define to_xcp_attr(x) \ + container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr) +#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj) + +static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct amdgpu_xcp_res_sysfs_attribute *attribute; + struct amdgpu_xcp_res_details *xcp_res; + + attribute = to_xcp_attr(attr); + xcp_res = to_xcp_res(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(xcp_res, buf); +} + +static const struct sysfs_ops xcp_cfg_res_sysfs_ops = { + .show = xcp_cfg_res_sysfs_attr_show, +}; + +static const struct kobj_type xcp_cfg_res_sysfs_ktype = { + .sysfs_ops = &xcp_cfg_res_sysfs_ops, + .default_groups = xcp_cfg_res_sysfs_groups, +}; + +const char *xcp_res_names[] = { + [AMDGPU_XCP_RES_XCC] = "xcc", + [AMDGPU_XCP_RES_DMA] = "dma", + [AMDGPU_XCP_RES_DEC] = "dec", + [AMDGPU_XCP_RES_JPEG] = "jpeg", +}; + +static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info) + return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg); + + return -EOPNOTSUPP; +} + +#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj) +static ssize_t supported_xcp_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr; + int size = 0, mode; + char *sep = ""; + + if (!xcp_mgr || !xcp_mgr->supp_xcp_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t supported_nps_configs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int size = 0, mode; + char *sep = ""; + + if (!xcp_cfg || !xcp_cfg->compatible_nps_modes) + return sysfs_emit(buf, "Not supported\n"); + + for_each_inst(mode, xcp_cfg->compatible_nps_modes) { + size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]); + sep = ", "; + } + + size += sysfs_emit_at(buf, size, "\n"); + + return size; +} + +static ssize_t xcp_config_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + return sysfs_emit(buf, "%s\n", + amdgpu_gfx_compute_mode_desc(xcp_cfg->mode)); +} + +static ssize_t xcp_config_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + int mode, r; + + if (!strncasecmp("SPX", buf, strlen("SPX"))) + mode = AMDGPU_SPX_PARTITION_MODE; + else if (!strncasecmp("DPX", buf, strlen("DPX"))) + mode = AMDGPU_DPX_PARTITION_MODE; + else if (!strncasecmp("TPX", buf, strlen("TPX"))) + mode = AMDGPU_TPX_PARTITION_MODE; + else if (!strncasecmp("QPX", buf, strlen("QPX"))) + mode = AMDGPU_QPX_PARTITION_MODE; + else if (!strncasecmp("CPX", buf, strlen("CPX"))) + mode = AMDGPU_CPX_PARTITION_MODE; + else + return -EINVAL; + + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + + if (r) + return r; + + xcp_cfg->mode = mode; + return size; +} + +static struct kobj_attribute xcp_cfg_sysfs_mode = + __ATTR_RW_MODE(xcp_config, 0644); + +static void xcp_cfg_sysfs_release(struct kobject *kobj) +{ + struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj); + + kfree(xcp_cfg); +} + +static const struct kobj_type xcp_cfg_sysfs_ktype = { + .release = xcp_cfg_sysfs_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct kobj_attribute supp_part_sysfs_mode = + __ATTR_RO(supported_xcp_configs); + +static struct kobj_attribute supp_nps_sysfs_mode = + __ATTR_RO(supported_nps_configs); + +static const struct attribute *xcp_attrs[] = { + &supp_part_sysfs_mode.attr, + &xcp_cfg_sysfs_mode.attr, + NULL, +}; + +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i, r, j, rid, mode; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL); + if (!xcp_cfg) + return; + xcp_cfg->xcp_mgr = adev->xcp_mgr; + + r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype, + &adev->dev->kobj, "compute_partition_config"); + if (r) + goto err1; + + r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs); + if (r) + goto err1; + + if (adev->gmc.supported_nps_modes != 0) { + r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + if (r) { + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + } + + mode = (xcp_cfg->xcp_mgr->mode == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ? + AMDGPU_SPX_PARTITION_MODE : + xcp_cfg->xcp_mgr->mode; + r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg); + if (r) { + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + goto err1; + } + + xcp_cfg->mode = mode; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + rid = xcp_res->id; + r = kobject_init_and_add(&xcp_res->kobj, + &xcp_cfg_res_sysfs_ktype, + &xcp_cfg->kobj, "%s", + xcp_res_names[rid]); + if (r) + goto err; + } + + adev->xcp_mgr->xcp_cfg = xcp_cfg; + return; +err: + for (j = 0; j < i; j++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); +err1: + kobject_put(&xcp_cfg->kobj); +} + +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) +{ + struct amdgpu_xcp_res_details *xcp_res; + struct amdgpu_xcp_cfg *xcp_cfg; + int i; + + if (!adev->xcp_mgr) + return; + + xcp_cfg = adev->xcp_mgr->xcp_cfg; + for (i = 0; i < xcp_cfg->num_res; i++) { + xcp_res = &xcp_cfg->xcp_res[i]; + kobject_put(&xcp_res->kobj); + } + + sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); + sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); + kobject_put(&xcp_cfg->kobj); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 32775260556f..b63f53242c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -56,6 +56,30 @@ enum AMDGPU_XCP_STATE { AMDGPU_XCP_RESUME, }; +enum amdgpu_xcp_res_id { + AMDGPU_XCP_RES_XCC, + AMDGPU_XCP_RES_DMA, + AMDGPU_XCP_RES_DEC, + AMDGPU_XCP_RES_JPEG, + AMDGPU_XCP_RES_MAX, +}; + +struct amdgpu_xcp_res_details { + enum amdgpu_xcp_res_id id; + u8 num_inst; + u8 num_shared; + struct kobject kobj; +}; + +struct amdgpu_xcp_cfg { + u8 mode; + struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX]; + u8 num_res; + struct amdgpu_xcp_mgr *xcp_mgr; + struct kobject kobj; + u16 compatible_nps_modes; +}; + struct amdgpu_xcp_ip_funcs { int (*prepare_suspend)(void *handle, uint32_t inst_mask); int (*suspend)(void *handle, uint32_t inst_mask); @@ -97,6 +121,9 @@ struct amdgpu_xcp_mgr { /* Used to determine KFD memory size limits per XCP */ unsigned int num_xcp_per_mem_partition; + struct amdgpu_xcp_cfg *xcp_cfg; + uint32_t supp_xcp_modes; + uint32_t avail_xcp_modes; }; struct amdgpu_xcp_mgr_funcs { @@ -108,7 +135,9 @@ struct amdgpu_xcp_mgr_funcs { struct amdgpu_xcp_ip *ip); int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp *xcp, uint8_t *mem_id); - + int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg); int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -146,6 +175,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); +void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev); +void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev); + #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ (adev)->xcp_mgr->funcs->select_scheds ? \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7de449fae1e3..b47422b0b5b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -667,6 +667,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) task_barrier_init(&hive->tb); hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; hive->hi_req_gpu = NULL; + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); /* * hive pstate on boot is high in vega20 so we have to go to low @@ -800,6 +801,23 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, return -EINVAL; } +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + int i; + + /* Sharing should always be enabled for non-SRIOV. */ + if (!amdgpu_sriov_vf(adev)) + return true; + + for (i = 0 ; i < top->num_nodes; ++i) + if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) + return !!top->nodes[i].is_sharing_enabled; + + return false; +} + /* * Devices that support extended data require the entire hive to initialize with * the shared memory buffer flag set. @@ -860,8 +878,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { ret = psp_xgmi_initialize(&adev->psp, false, true); if (ret) { dev_err(adev->dev, @@ -907,8 +924,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) task_barrier_add_task(&hive->tb); - if (!adev->gmc.xgmi.pending_reset && - amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ if (tmp_adev != adev) { @@ -985,7 +1001,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } } - if (!ret && !adev->gmc.xgmi.pending_reset) + if (!ret) ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); exit_unlock: @@ -1500,3 +1516,117 @@ int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) return 0; } + +static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work) +{ + struct amdgpu_hive_info *hive = + container_of(work, struct amdgpu_hive_info, reset_on_init_work); + struct amdgpu_reset_context reset_context; + struct amdgpu_device *tmp_adev; + struct list_head device_list; + int r; + + mutex_lock(&hive->hive_lock); + + INIT_LIST_HEAD(&device_list); + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_add_tail(&tmp_adev->reset_list, &device_list); + + tmp_adev = list_first_entry(&device_list, struct amdgpu_device, + reset_list); + amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); + + reset_context.method = AMD_RESET_METHOD_ON_INIT; + reset_context.reset_req_dev = tmp_adev; + reset_context.hive = hive; + reset_context.reset_device_list = &device_list; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); + + amdgpu_reset_do_xgmi_reset_on_init(&reset_context); + mutex_unlock(&hive->hive_lock); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); + + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = amdgpu_ras_init_badpage_info(tmp_adev); + if (r && r != -EHWPOISON) + dev_err(tmp_adev->dev, + "error during bad page data initialization"); + } +} + +static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive) +{ + INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work); + amdgpu_reset_domain_schedule(hive->reset_domain, + &hive->reset_on_init_work); +} + +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev) +{ + struct amdgpu_hive_info *hive; + bool reset_scheduled; + int num_devs; + + hive = amdgpu_get_xgmi_hive(adev); + if (!hive) + return -EINVAL; + + mutex_lock(&hive->hive_lock); + num_devs = atomic_read(&hive->number_devices); + reset_scheduled = false; + if (num_devs == adev->gmc.xgmi.num_physical_nodes) { + amdgpu_xgmi_schedule_reset_on_init(hive); + reset_scheduled = true; + } + + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + + if (reset_scheduled) + flush_work(&hive->reset_on_init_work); + + return 0; +} + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode) +{ + struct amdgpu_device *tmp_adev; + int cur_nps_mode, r; + + /* This is expected to be called only during unload of driver. The + * request needs to be placed only once for all devices in the hive. If + * one of them fail, revert the request for previous successful devices. + * After placing the request, make hive mode as UNKNOWN so that other + * devices don't request anymore. + */ + mutex_lock(&hive->hive_lock); + if (atomic_read(&hive->requested_nps_mode) == + UNKNOWN_MEMORY_PARTITION_MODE) { + dev_dbg(adev->dev, "Unexpected entry for hive NPS change"); + mutex_unlock(&hive->hive_lock); + return 0; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + r = adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, req_nps_mode); + if (r) + break; + } + if (r) { + /* Request back current mode if one of the requests failed */ + cur_nps_mode = + adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev); + list_for_each_entry_continue_reverse( + tmp_adev, &hive->device_list, gmc.xgmi.head) + adev->gmc.gmc_funcs->request_mem_partition_mode( + tmp_adev, cur_nps_mode); + } + /* Set to UNKNOWN so that other devices don't request anymore */ + atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); + mutex_unlock(&hive->hive_lock); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index a3bfc16de6d4..8cc7ab38db7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -45,6 +45,8 @@ struct amdgpu_hive_info { struct amdgpu_reset_domain *reset_domain; atomic_t ras_recovery; struct ras_event_manager event_mgr; + struct work_struct reset_on_init_work; + atomic_t requested_nps_mode; }; struct amdgpu_pcs_ras_field { @@ -64,6 +66,8 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, @@ -75,5 +79,10 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); } int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev); + +int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, + int req_nps_mode); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 6e9eeaeb3de1..b4f9c2f4e92c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -28,17 +28,21 @@ #define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 #define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB #define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 - +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 +#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 /* * layout - * 0 64KB 65KB 66KB - * | VBIOS | PF2VF | VF2PF | Bad Page | ... - * | 64KB | 1KB | 1KB | + * 0 64KB 65KB 66KB 68KB 132KB + * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ + #define AMD_SRIOV_MSG_SIZE_KB 1 #define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB #define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) #define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) /* * PF2VF history log: @@ -86,30 +90,59 @@ enum amd_sriov_ucode_engine_id { union amd_sriov_msg_feature_flags { struct { - uint32_t error_log_collect : 1; - uint32_t host_load_ucodes : 1; - uint32_t host_flr_vramlost : 1; - uint32_t mm_bw_management : 1; - uint32_t pp_one_vf_mode : 1; - uint32_t reg_indirect_acc : 1; - uint32_t av1_support : 1; - uint32_t vcn_rb_decouple : 1; - uint32_t mes_info_enable : 1; - uint32_t reserved : 23; + uint32_t error_log_collect : 1; + uint32_t host_load_ucodes : 1; + uint32_t host_flr_vramlost : 1; + uint32_t mm_bw_management : 1; + uint32_t pp_one_vf_mode : 1; + uint32_t reg_indirect_acc : 1; + uint32_t av1_support : 1; + uint32_t vcn_rb_decouple : 1; + uint32_t mes_info_dump_enable : 1; + uint32_t ras_caps : 1; + uint32_t ras_telemetry : 1; + uint32_t reserved : 21; } flags; uint32_t all; }; union amd_sriov_reg_access_flags { struct { - uint32_t vf_reg_access_ih : 1; - uint32_t vf_reg_access_mmhub : 1; - uint32_t vf_reg_access_gc : 1; - uint32_t reserved : 29; + uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_mmhub : 1; + uint32_t vf_reg_access_gc : 1; + uint32_t reserved : 29; } flags; uint32_t all; }; +union amd_sriov_ras_caps { + struct { + uint64_t block_umc : 1; + uint64_t block_sdma : 1; + uint64_t block_gfx : 1; + uint64_t block_mmhub : 1; + uint64_t block_athub : 1; + uint64_t block_pcie_bif : 1; + uint64_t block_hdp : 1; + uint64_t block_xgmi_wafl : 1; + uint64_t block_df : 1; + uint64_t block_smn : 1; + uint64_t block_sem : 1; + uint64_t block_mp0 : 1; + uint64_t block_mp1 : 1; + uint64_t block_fuse : 1; + uint64_t block_mca : 1; + uint64_t block_vcn : 1; + uint64_t block_jpeg : 1; + uint64_t block_ih : 1; + uint64_t block_mpio : 1; + uint64_t poison_propogation_mode : 1; + uint64_t reserved : 44; + } bits; + uint64_t all; +}; + union amd_sriov_msg_os_info { struct { uint32_t windows : 1; @@ -158,7 +191,7 @@ struct amd_sriov_msg_pf2vf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49) +#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55) struct amd_sriov_msg_pf2vf_info { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; @@ -211,6 +244,12 @@ struct amd_sriov_msg_pf2vf_info { uint32_t pcie_atomic_ops_support_flags; /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */ uint32_t gpu_capacity; + /* vf bdf on host pci tree for debug only */ + uint32_t bdf_on_host; + uint32_t more_bp; //Reserved for future use. + union amd_sriov_ras_caps ras_en_caps; + union amd_sriov_ras_caps ras_telemetry_en_caps; + /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; } __packed; @@ -283,8 +322,12 @@ enum amd_sriov_mailbox_request_message { MB_REQ_MSG_REL_GPU_FINI_ACCESS, MB_REQ_MSG_REQ_GPU_RESET_ACCESS, MB_REQ_MSG_REQ_GPU_INIT_DATA, + MB_REQ_MSG_PSP_VF_CMD_RELAY, MB_REQ_MSG_LOG_VF_ERROR = 200, + MB_REQ_MSG_READY_TO_RESET = 201, + MB_REQ_MSG_RAS_POISON = 202, + MB_REQ_RAS_ERROR_COUNT = 203, }; /* mailbox message send from host to guest */ @@ -297,10 +340,60 @@ enum amd_sriov_mailbox_response_message { MB_RES_MSG_FAIL, MB_RES_MSG_QUERY_ALIVE, MB_RES_MSG_GPU_INIT_DATA_READY, + MB_RES_MSG_RAS_ERROR_COUNT_READY = 11, MB_RES_MSG_TEXT_MESSAGE = 255 }; +enum amd_sriov_ras_telemetry_gpu_block { + RAS_TELEMETRY_GPU_BLOCK_UMC = 0, + RAS_TELEMETRY_GPU_BLOCK_SDMA = 1, + RAS_TELEMETRY_GPU_BLOCK_GFX = 2, + RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3, + RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4, + RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5, + RAS_TELEMETRY_GPU_BLOCK_HDP = 6, + RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7, + RAS_TELEMETRY_GPU_BLOCK_DF = 8, + RAS_TELEMETRY_GPU_BLOCK_SMN = 9, + RAS_TELEMETRY_GPU_BLOCK_SEM = 10, + RAS_TELEMETRY_GPU_BLOCK_MP0 = 11, + RAS_TELEMETRY_GPU_BLOCK_MP1 = 12, + RAS_TELEMETRY_GPU_BLOCK_FUSE = 13, + RAS_TELEMETRY_GPU_BLOCK_MCA = 14, + RAS_TELEMETRY_GPU_BLOCK_VCN = 15, + RAS_TELEMETRY_GPU_BLOCK_JPEG = 16, + RAS_TELEMETRY_GPU_BLOCK_IH = 17, + RAS_TELEMETRY_GPU_BLOCK_MPIO = 18, + RAS_TELEMETRY_GPU_BLOCK_COUNT = 19, +}; + +struct amd_sriov_ras_telemetry_header { + uint32_t checksum; + uint32_t used_size; + uint32_t reserved[2]; +}; + +struct amd_sriov_ras_telemetry_error_count { + struct { + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t ce_overflow_count; + uint32_t ue_overflow_count; + uint32_t de_overflow_count; + uint32_t reserved[6]; + } block[RAS_TELEMETRY_GPU_BLOCK_COUNT]; +}; + +struct amdsriov_ras_telemetry { + struct amd_sriov_ras_telemetry_header header; + + union { + struct amd_sriov_ras_telemetry_error_count error_count; + } body; +}; + /* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */ enum amd_sriov_gpu_init_data_version { GPU_INIT_DATA_READY_V1 = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index ccfd2a4b4acc..e157d6d857b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -447,6 +447,72 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x return 0; } +static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int max_res[AMDGPU_XCP_RES_MAX] = {}; + bool res_lt_xcp; + int num_xcp, i; + u16 nps_modes; + + if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + return -EINVAL; + + max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); + max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; + max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcp = 1; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcp = 2; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcp = 3; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcp = 4; + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcp = NUM_XCC(adev->gfx.xcc_mask); + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + return -EINVAL; + } + + xcp_cfg->compatible_nps_modes = + (adev->gmc.supported_nps_modes & nps_modes); + xcp_cfg->num_res = ARRAY_SIZE(max_res); + + for (i = 0; i < xcp_cfg->num_res; i++) { + res_lt_xcp = max_res[i] < num_xcp; + xcp_cfg->xcp_res[i].id = i; + xcp_cfg->xcp_res[i].num_inst = + res_lt_xcp ? 1 : max_res[i] / num_xcp; + xcp_cfg->xcp_res[i].num_inst = + i == AMDGPU_XCP_RES_JPEG ? + xcp_cfg->xcp_res[i].num_inst * + adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst; + xcp_cfg->xcp_res[i].num_shared = + res_lt_xcp ? num_xcp / max_res[i] : 1; + } + + return 0; +} + static enum amdgpu_gfx_partition __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) { @@ -530,6 +596,57 @@ static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, return ret; } +static void +__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + /* this seems only existing in emulation phase */ + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + int mode; + + xcp_mgr->avail_xcp_modes = 0; + + for_each_inst(mode, xcp_mgr->supp_xcp_modes) { + if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + xcp_mgr->avail_xcp_modes |= BIT(mode); + } +} + static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode, int *num_xcps) { @@ -578,6 +695,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + if (!ret) + __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); @@ -656,9 +775,11 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, + .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list + .update_partition_sched_list = + &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -673,6 +794,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; + __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index cf1d5d462b67..e2cb1f080e88 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1985,9 +1985,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .query_video_codecs = &cik_query_video_codecs, }; -static int cik_common_early_init(void *handle) +static int cik_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &cik_smc_rreg; adev->smc_wreg = &cik_smc_wreg; @@ -2124,19 +2124,9 @@ static int cik_common_early_init(void *handle) return 0; } -static int cik_common_sw_init(void *handle) +static int cik_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int cik_common_sw_fini(void *handle) -{ - return 0; -} - -static int cik_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ cik_init_golden_registers(adev); @@ -2148,23 +2138,14 @@ static int cik_common_hw_init(void *handle) return 0; } -static int cik_common_hw_fini(void *handle) +static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int cik_common_suspend(void *handle) +static int cik_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_fini(adev); -} - -static int cik_common_resume(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_common_hw_init(adev); + return cik_common_hw_init(ip_block); } static bool cik_common_is_idle(void *handle) @@ -2172,12 +2153,9 @@ static bool cik_common_is_idle(void *handle) return true; } -static int cik_common_wait_for_idle(void *handle) -{ - return 0; -} -static int cik_common_soft_reset(void *handle) + +static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX hard reset?? */ return 0; @@ -2198,20 +2176,13 @@ static int cik_common_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_common_ip_funcs = { .name = "cik_common", .early_init = cik_common_early_init, - .late_init = NULL, - .sw_init = cik_common_sw_init, - .sw_fini = cik_common_sw_fini, .hw_init = cik_common_hw_init, .hw_fini = cik_common_hw_fini, - .suspend = cik_common_suspend, .resume = cik_common_resume, .is_idle = cik_common_is_idle, - .wait_for_idle = cik_common_wait_for_idle, .soft_reset = cik_common_soft_reset, .set_clockgating_state = cik_common_set_clockgating_state, .set_powergating_state = cik_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version cik_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 576baa9dbb0e..1da17755ad53 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -283,9 +283,9 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cik_ih_early_init(void *handle) +static int cik_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int cik_ih_early_init(void *handle) return 0; } -static int cik_ih_sw_init(void *handle) +static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -311,9 +311,9 @@ static int cik_ih_sw_init(void *handle) return r; } -static int cik_ih_sw_fini(void *handle) +static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -321,34 +321,28 @@ static int cik_ih_sw_fini(void *handle) return 0; } -static int cik_ih_hw_init(void *handle) +static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return cik_ih_irq_init(adev); } -static int cik_ih_hw_fini(void *handle) +static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_ih_irq_disable(adev); + cik_ih_irq_disable(ip_block->adev); return 0; } -static int cik_ih_suspend(void *handle) +static int cik_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_fini(adev); + return cik_ih_hw_fini(ip_block); } -static int cik_ih_resume(void *handle) +static int cik_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_ih_hw_init(adev); + return cik_ih_hw_init(ip_block); } static bool cik_ih_is_idle(void *handle) @@ -362,11 +356,11 @@ static bool cik_ih_is_idle(void *handle) return true; } -static int cik_ih_wait_for_idle(void *handle) +static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -378,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_ih_soft_reset(void *handle) +static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -423,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_ih_ip_funcs = { .name = "cik_ih", .early_init = cik_ih_early_init, - .late_init = NULL, .sw_init = cik_ih_sw_init, .sw_fini = cik_ih_sw_fini, .hw_init = cik_ih_hw_init, @@ -435,8 +428,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { .soft_reset = cik_ih_soft_reset, .set_clockgating_state = cik_ih_set_clockgating_state, .set_powergating_state = cik_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cik_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 952737de9411..ede1a028d48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -54,7 +54,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); -static int cik_sdma_soft_reset(void *handle); +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); @@ -918,9 +918,9 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(void *handle) +static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -937,10 +937,10 @@ static int cik_sdma_early_init(void *handle) return 0; } -static int cik_sdma_sw_init(void *handle) +static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, i; /* SDMA trap event */ @@ -977,9 +977,9 @@ static int cik_sdma_sw_init(void *handle) return r; } -static int cik_sdma_sw_fini(void *handle) +static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -989,10 +989,10 @@ static int cik_sdma_sw_fini(void *handle) return 0; } -static int cik_sdma_hw_init(void *handle) +static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cik_sdma_start(adev); if (r) @@ -1001,9 +1001,9 @@ static int cik_sdma_hw_init(void *handle) return r; } -static int cik_sdma_hw_fini(void *handle) +static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); @@ -1011,20 +1011,16 @@ static int cik_sdma_hw_fini(void *handle) return 0; } -static int cik_sdma_suspend(void *handle) +static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cik_sdma_hw_fini(adev); + return cik_sdma_hw_fini(ip_block); } -static int cik_sdma_resume(void *handle) +static int cik_sdma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cik_sdma_soft_reset(handle); + cik_sdma_soft_reset(ip_block); - return cik_sdma_hw_init(adev); + return cik_sdma_hw_init(ip_block); } static bool cik_sdma_is_idle(void *handle) @@ -1039,11 +1035,11 @@ static bool cik_sdma_is_idle(void *handle) return true; } -static int cik_sdma_wait_for_idle(void *handle) +static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1056,10 +1052,10 @@ static int cik_sdma_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cik_sdma_soft_reset(void *handle) +static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; /* sdma0 */ @@ -1217,7 +1213,6 @@ static int cik_sdma_set_powergating_state(void *handle, static const struct amd_ip_funcs cik_sdma_ip_funcs = { .name = "cik_sdma", .early_init = cik_sdma_early_init, - .late_init = NULL, .sw_init = cik_sdma_sw_init, .sw_fini = cik_sdma_sw_fini, .hw_init = cik_sdma_hw_init, @@ -1229,8 +1224,6 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = { .soft_reset = cik_sdma_soft_reset, .set_clockgating_state = cik_sdma_set_clockgating_state, .set_powergating_state = cik_sdma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 072643787384..d72973bd570d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -274,9 +274,9 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int cz_ih_early_init(void *handle) +static int cz_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -288,10 +288,10 @@ static int cz_ih_early_init(void *handle) return 0; } -static int cz_ih_sw_init(void *handle) +static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -302,9 +302,9 @@ static int cz_ih_sw_init(void *handle) return r; } -static int cz_ih_sw_fini(void *handle) +static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -312,10 +312,10 @@ static int cz_ih_sw_fini(void *handle) return 0; } -static int cz_ih_hw_init(void *handle) +static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = cz_ih_irq_init(adev); if (r) @@ -324,27 +324,21 @@ static int cz_ih_hw_init(void *handle) return 0; } -static int cz_ih_hw_fini(void *handle) +static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cz_ih_irq_disable(adev); + cz_ih_irq_disable(ip_block->adev); return 0; } -static int cz_ih_suspend(void *handle) +static int cz_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_fini(adev); + return cz_ih_hw_fini(ip_block); } -static int cz_ih_resume(void *handle) +static int cz_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return cz_ih_hw_init(adev); + return cz_ih_hw_init(ip_block); } static bool cz_ih_is_idle(void *handle) @@ -358,11 +352,11 @@ static bool cz_ih_is_idle(void *handle) return true; } -static int cz_ih_wait_for_idle(void *handle) +static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -374,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int cz_ih_soft_reset(void *handle) +static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -421,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs cz_ih_ip_funcs = { .name = "cz_ih", .early_init = cz_ih_early_init, - .late_init = NULL, .sw_init = cz_ih_sw_init, .sw_fini = cz_ih_sw_fini, .hw_init = cz_ih_hw_init, @@ -433,8 +426,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { .soft_reset = cz_ih_soft_reset, .set_clockgating_state = cz_ih_set_clockgating_state, .set_powergating_state = cz_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cz_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 70c1399f738d..5098c50d54c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2738,9 +2738,9 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v10_0_early_init(void *handle) +static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; @@ -2765,10 +2765,10 @@ static int dce_v10_0_early_init(void *handle) return 0; } -static int dce_v10_0_sw_init(void *handle) +static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2844,9 +2844,9 @@ static int dce_v10_0_sw_init(void *handle) return 0; } -static int dce_v10_0_sw_fini(void *handle) +static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2862,10 +2862,10 @@ static int dce_v10_0_sw_fini(void *handle) return 0; } -static int dce_v10_0_hw_init(void *handle) +static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_init_golden_registers(adev); @@ -2887,10 +2887,10 @@ static int dce_v10_0_hw_init(void *handle) return 0; } -static int dce_v10_0_hw_fini(void *handle) +static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v10_0_hpd_fini(adev); @@ -2905,9 +2905,9 @@ static int dce_v10_0_hw_fini(void *handle) return 0; } -static int dce_v10_0_suspend(void *handle) +static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2917,18 +2917,18 @@ static int dce_v10_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v10_0_hw_fini(handle); + return dce_v10_0_hw_fini(ip_block); } -static int dce_v10_0_resume(void *handle) +static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v10_0_hw_init(handle); + ret = dce_v10_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2948,22 +2948,17 @@ static bool dce_v10_0_is_idle(void *handle) return true; } -static int dce_v10_0_wait_for_idle(void *handle) +static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static bool dce_v10_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return dce_v10_0_is_display_hung(adev); } -static int dce_v10_0_soft_reset(void *handle) +static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3322,7 +3317,6 @@ static int dce_v10_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .name = "dce_v10_0", .early_init = dce_v10_0_early_init, - .late_init = NULL, .sw_init = dce_v10_0_sw_init, .sw_fini = dce_v10_0_sw_fini, .hw_init = dce_v10_0_hw_init, @@ -3330,13 +3324,10 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .suspend = dce_v10_0_suspend, .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, - .wait_for_idle = dce_v10_0_wait_for_idle, .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f154c24499c8..c5680ff4ab9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2851,9 +2851,9 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v11_0_early_init(void *handle) +static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; @@ -2891,10 +2891,10 @@ static int dce_v11_0_early_init(void *handle) return 0; } -static int dce_v11_0_sw_init(void *handle) +static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2971,9 +2971,9 @@ static int dce_v11_0_sw_init(void *handle) return 0; } -static int dce_v11_0_sw_fini(void *handle) +static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2989,10 +2989,10 @@ static int dce_v11_0_sw_fini(void *handle) return 0; } -static int dce_v11_0_hw_init(void *handle) +static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_init_golden_registers(adev); @@ -3025,10 +3025,10 @@ static int dce_v11_0_hw_init(void *handle) return 0; } -static int dce_v11_0_hw_fini(void *handle) +static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v11_0_hpd_fini(adev); @@ -3043,9 +3043,9 @@ static int dce_v11_0_hw_fini(void *handle) return 0; } -static int dce_v11_0_suspend(void *handle) +static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -3055,18 +3055,18 @@ static int dce_v11_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v11_0_hw_fini(handle); + return dce_v11_0_hw_fini(ip_block); } -static int dce_v11_0_resume(void *handle) +static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v11_0_hw_init(handle); + ret = dce_v11_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -3086,15 +3086,10 @@ static bool dce_v11_0_is_idle(void *handle) return true; } -static int dce_v11_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v11_0_soft_reset(void *handle) +static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v11_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3454,7 +3449,6 @@ static int dce_v11_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .name = "dce_v11_0", .early_init = dce_v11_0_early_init, - .late_init = NULL, .sw_init = dce_v11_0_sw_init, .sw_fini = dce_v11_0_sw_fini, .hw_init = dce_v11_0_hw_init, @@ -3462,12 +3456,9 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .suspend = dce_v11_0_suspend, .resume = dce_v11_0_resume, .is_idle = dce_v11_0_is_idle, - .wait_for_idle = dce_v11_0_wait_for_idle, .soft_reset = dce_v11_0_soft_reset, .set_clockgating_state = dce_v11_0_set_clockgating_state, .set_powergating_state = dce_v11_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index a7fcb135827f..eb7de9122d99 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2633,9 +2633,9 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v6_0_early_init(void *handle) +static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; @@ -2664,11 +2664,11 @@ static int dce_v6_0_early_init(void *handle) return 0; } -static int dce_v6_0_sw_init(void *handle) +static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; bool ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2743,9 +2743,9 @@ static int dce_v6_0_sw_init(void *handle) return r; } -static int dce_v6_0_sw_fini(void *handle) +static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2760,10 +2760,10 @@ static int dce_v6_0_sw_fini(void *handle) return 0; } -static int dce_v6_0_hw_init(void *handle) +static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v6_0_set_vga_render_state(adev, false); @@ -2783,10 +2783,10 @@ static int dce_v6_0_hw_init(void *handle) return 0; } -static int dce_v6_0_hw_fini(void *handle) +static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v6_0_hpd_fini(adev); @@ -2801,9 +2801,9 @@ static int dce_v6_0_hw_fini(void *handle) return 0; } -static int dce_v6_0_suspend(void *handle) +static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2812,18 +2812,18 @@ static int dce_v6_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v6_0_hw_fini(handle); + return dce_v6_0_hw_fini(ip_block); } -static int dce_v6_0_resume(void *handle) +static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v6_0_hw_init(handle); + ret = dce_v6_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2843,12 +2843,7 @@ static bool dce_v6_0_is_idle(void *handle) return true; } -static int dce_v6_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v6_0_soft_reset(void *handle) +static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); return 0; @@ -3144,7 +3139,6 @@ static int dce_v6_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .name = "dce_v6_0", .early_init = dce_v6_0_early_init, - .late_init = NULL, .sw_init = dce_v6_0_sw_init, .sw_fini = dce_v6_0_sw_fini, .hw_init = dce_v6_0_hw_init, @@ -3152,12 +3146,9 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .suspend = dce_v6_0_suspend, .resume = dce_v6_0_resume, .is_idle = dce_v6_0_is_idle, - .wait_for_idle = dce_v6_0_wait_for_idle, .soft_reset = dce_v6_0_soft_reset, .set_clockgating_state = dce_v6_0_set_clockgating_state, .set_powergating_state = dce_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 77ac3f114d24..04b79ff87f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2644,9 +2644,9 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v8_0_early_init(void *handle) +static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; @@ -2680,10 +2680,10 @@ static int dce_v8_0_early_init(void *handle) return 0; } -static int dce_v8_0_sw_init(void *handle) +static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); @@ -2764,9 +2764,9 @@ static int dce_v8_0_sw_init(void *handle) return 0; } -static int dce_v8_0_sw_fini(void *handle) +static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; drm_edid_free(adev->mode_info.bios_hardcoded_edid); @@ -2782,10 +2782,10 @@ static int dce_v8_0_sw_fini(void *handle) return 0; } -static int dce_v8_0_hw_init(void *handle) +static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* disable vga render */ dce_v8_0_set_vga_render_state(adev, false); @@ -2805,10 +2805,10 @@ static int dce_v8_0_hw_init(void *handle) return 0; } -static int dce_v8_0_hw_fini(void *handle) +static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; dce_v8_0_hpd_fini(adev); @@ -2823,9 +2823,9 @@ static int dce_v8_0_hw_fini(void *handle) return 0; } -static int dce_v8_0_suspend(void *handle) +static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_display_suspend_helper(adev); @@ -2835,18 +2835,18 @@ static int dce_v8_0_suspend(void *handle) adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - return dce_v8_0_hw_fini(handle); + return dce_v8_0_hw_fini(ip_block); } -static int dce_v8_0_resume(void *handle) +static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, adev->mode_info.bl_level); - ret = dce_v8_0_hw_init(handle); + ret = dce_v8_0_hw_init(ip_block); /* turn on the BL */ if (adev->mode_info.bl_encoder) { @@ -2866,15 +2866,10 @@ static bool dce_v8_0_is_idle(void *handle) return true; } -static int dce_v8_0_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_v8_0_soft_reset(void *handle) +static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (dce_v8_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3232,7 +3227,6 @@ static int dce_v8_0_set_powergating_state(void *handle, static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .name = "dce_v8_0", .early_init = dce_v8_0_early_init, - .late_init = NULL, .sw_init = dce_v8_0_sw_init, .sw_fini = dce_v8_0_sw_fini, .hw_init = dce_v8_0_hw_init, @@ -3240,12 +3234,9 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .suspend = dce_v8_0_suspend, .resume = dce_v8_0_resume, .is_idle = dce_v8_0_is_idle, - .wait_for_idle = dce_v8_0_wait_for_idle, .soft_reset = dce_v8_0_soft_reset, .set_clockgating_state = dce_v8_0_set_clockgating_state, .set_powergating_state = dce_v8_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 45ed97038df0..24dce803a829 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3677,13 +3677,19 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -4683,11 +4689,11 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v10_0_sw_init(void *handle) +static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(10, 1, 10): @@ -4726,6 +4732,11 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.mec.num_queue_per_pipe = 8; break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } /* KIQ event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, @@ -4814,6 +4825,11 @@ static int gfx_v10_0_sw_init(void *handle) } } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -4842,6 +4858,10 @@ static int gfx_v10_0_sw_init(void *handle) gfx_v10_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -4866,10 +4886,10 @@ static void gfx_v10_0_me_fini(struct amdgpu_device *adev) (void **)&adev->gfx.me.me_fw_ptr); } -static int gfx_v10_0_sw_fini(void *handle) +static int gfx_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4881,6 +4901,8 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v10_0_pfp_fini(adev); gfx_v10_0_ce_fini(adev); gfx_v10_0_me_fini(adev); @@ -4891,6 +4913,7 @@ static int gfx_v10_0_sw_fini(void *handle) gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev); gfx_v10_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -6374,7 +6397,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -6412,7 +6435,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -7366,14 +7389,17 @@ static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data); } -static int gfx_v10_0_hw_init(void *handle) +static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { /** * For gfx 10, rlc firmware loading relies on smu firmware is @@ -7418,9 +7444,9 @@ static int gfx_v10_0_hw_init(void *handle) return r; } -static int gfx_v10_0_hw_fini(void *handle) +static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -7431,7 +7457,7 @@ static int gfx_v10_0_hw_fini(void *handle) * otherwise the gfxoff disallowing will be failed to set. */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) - gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); + gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -7456,14 +7482,14 @@ static int gfx_v10_0_hw_fini(void *handle) return 0; } -static int gfx_v10_0_suspend(void *handle) +static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_fini(handle); + return gfx_v10_0_hw_fini(ip_block); } -static int gfx_v10_0_resume(void *handle) +static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v10_0_hw_init(handle); + return gfx_v10_0_hw_init(ip_block); } static bool gfx_v10_0_is_idle(void *handle) @@ -7477,11 +7503,11 @@ static bool gfx_v10_0_is_idle(void *handle) return true; } -static int gfx_v10_0_wait_for_idle(void *handle) +static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -7495,11 +7521,11 @@ static int gfx_v10_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v10_0_soft_reset(void *handle) +static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -7678,9 +7704,9 @@ static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v10_0_early_init(void *handle) +static int gfx_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v10_0_gfx_funcs; @@ -7722,9 +7748,9 @@ static int gfx_v10_0_early_init(void *handle) return gfx_v10_0_init_microcode(adev); } -static int gfx_v10_0_late_init(void *handle) +static int gfx_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -9402,8 +9428,6 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -9414,8 +9438,7 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -9568,9 +9591,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v10_ip_print(void *handle, struct drm_printer *p) +static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9632,9 +9655,9 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v10_ip_dump(void *handle) +static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); @@ -9699,6 +9722,13 @@ static void gfx_v10_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .name = "gfx_v10_0", .early_init = gfx_v10_0_early_init, @@ -9749,7 +9779,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2 + /* SWITCH_BUFFER */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9772,6 +9803,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -9791,7 +9825,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v10_0_emit_mem_sync */ + 8 + /* gfx_v10_0_emit_mem_sync */ + 2, /* gfx_v10_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -9809,6 +9844,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, + .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3e8be82a172..2ae058a224f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -46,6 +46,7 @@ #include "clearstate_gfx11.h" #include "v11_structs.h" #include "gfx_v11_0.h" +#include "gfx_v11_0_cleaner_shader.h" #include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" @@ -293,14 +294,20 @@ static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { + struct amdgpu_device *adev = kiq_ring->adev; + u64 shader_mc_addr; + + /* Cleaner shader MC address */ + shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ } @@ -483,8 +490,6 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -495,8 +500,7 @@ static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) @@ -1536,11 +1540,11 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v11_0_sw_init(void *handle) +static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): @@ -1575,6 +1579,29 @@ static int gfx_v11_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 2280 && + adev->gfx.pfp_fw_version >= 2370 && + adev->gfx.mec_fw_version >= 2450 && + adev->mes.fw_version[0] >= 99) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) @@ -1666,6 +1693,24 @@ static int gfx_v11_0_sw_init(void *handle) } } + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + if ((adev->gfx.me_fw_version >= 2280) && + (adev->gfx.mec_fw_version >= 2410)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } + break; + default: + break; + } + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); if (r) { @@ -1700,6 +1745,10 @@ static int gfx_v11_0_sw_init(void *handle) gfx_v11_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1732,10 +1781,10 @@ static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v11_0_sw_fini(void *handle) +static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1749,6 +1798,8 @@ static int gfx_v11_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev, 0); } + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v11_0_pfp_fini(adev); gfx_v11_0_me_fini(adev); gfx_v11_0_rlc_fini(adev); @@ -1759,6 +1810,8 @@ static int gfx_v11_0_sw_fini(void *handle) gfx_v11_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -1893,8 +1946,10 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); @@ -3555,7 +3610,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3593,7 +3648,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ + /* Set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -4568,10 +4623,13 @@ static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); } -static int gfx_v11_0_hw_init(void *handle) +static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, + adev->gfx.cleaner_shader_ptr); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs) { @@ -4665,9 +4723,9 @@ static int gfx_v11_0_hw_init(void *handle) return r; } -static int gfx_v11_0_hw_fini(void *handle) +static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4703,14 +4761,14 @@ static int gfx_v11_0_hw_fini(void *handle) return 0; } -static int gfx_v11_0_suspend(void *handle) +static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_fini(handle); + return gfx_v11_0_hw_fini(ip_block); } -static int gfx_v11_0_resume(void *handle) +static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v11_0_hw_init(handle); + return gfx_v11_0_hw_init(ip_block); } static bool gfx_v11_0_is_idle(void *handle) @@ -4724,11 +4782,11 @@ static bool gfx_v11_0_is_idle(void *handle) return true; } -static int gfx_v11_0_wait_for_idle(void *handle) +static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4774,12 +4832,12 @@ int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, return 0; } -static int gfx_v11_0_soft_reset(void *handle) +static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; int r, i, j, k; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -4905,10 +4963,10 @@ static int gfx_v11_0_soft_reset(void *handle) return gfx_v11_0_cp_resume(adev); } -static bool gfx_v11_0_check_soft_reset(void *handle) +static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; long tmo = msecs_to_jiffies(1000); @@ -4929,12 +4987,13 @@ static bool gfx_v11_0_check_soft_reset(void *handle) return false; } -static int gfx_v11_0_post_soft_reset(void *handle) +static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; /** * GFX soft reset will impact MES, need resume MES when do GFX soft reset */ - return amdgpu_mes_resume((struct amdgpu_device *)handle); + return amdgpu_mes_resume(adev); } static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) @@ -4995,9 +5054,9 @@ static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v11_0_early_init(void *handle) +static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v11_0_gfx_funcs; @@ -5018,9 +5077,9 @@ static int gfx_v11_0_early_init(void *handle) return gfx_v11_0_init_microcode(adev); } -static int gfx_v11_0_late_init(void *handle) +static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6639,9 +6698,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return amdgpu_ring_test_ring(ring); } -static void gfx_v11_ip_print(void *handle, struct drm_printer *p) +static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6703,9 +6762,9 @@ static void gfx_v11_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v11_ip_dump(void *handle) +static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); @@ -6769,6 +6828,13 @@ static void gfx_v11_ip_dump(void *handle) amdgpu_gfx_off_ctrl(adev, true); } +static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) +{ + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { .name = "gfx_v11_0", .early_init = gfx_v11_0_early_init, @@ -6818,7 +6884,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 22 + /* SET_Q_PREEMPTION_MODE */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ .emit_ib = gfx_v11_0_ring_emit_ib_gfx, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6841,6 +6908,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { @@ -6861,7 +6931,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v11_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v11_0_emit_mem_sync */ + 8 + /* gfx_v11_0_emit_mem_sync */ + 2, /* gfx_v11_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ .emit_ib = gfx_v11_0_ring_emit_ib_compute, .emit_fence = gfx_v11_0_ring_emit_fence, @@ -6879,6 +6950,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, + .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm new file mode 100644 index 000000000000..9b90b66368c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// Navi3 : Clear SGPRs, VGPRs and LDS +// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot +// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD +// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS) +// It takes 2 workgroups to use all of LDS: one on each CU of the WGP +// Each wave clears SGPRs 0 - 107 +// Each wave clears VGPRs 0 - 63 +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. + +shader main + asic(GFX11) + type(CS) + wave_size(32) +// Note: original source code from SQ team + +// Takes about 2500 clocks to run. +// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks) +// + S_BARRIER + + // + // CLEAR VGPRs + // + s_mov_b32 m0, 0x00000058 // Loop 96/8=12 times (loop unrolled for performance) + +label_0005: + v_movreld_b32 v0, 0 + v_movreld_b32 v1, 0 + v_movreld_b32 v2, 0 + v_movreld_b32 v3, 0 + v_movreld_b32 v4, 0 + v_movreld_b32 v5, 0 + v_movreld_b32 v6, 0 + v_movreld_b32 v7, 0 + s_sub_u32 m0, m0, 8 + s_cbranch_scc0 label_0005 + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_0023: + s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance) +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc + s_mov_b64 vcc, 0 //clear vcc + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 + + s_endpgm + +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h new file mode 100644 index 000000000000..3218cc04f543 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* Define the cleaner shader gfx_11_0_3 */ +static const u32 gfx_11_0_3_cleaner_shader_hex[] = { + 0xb0804006, 0xbe8200ff, + 0x00000058, 0xbefd0080, + 0x7e008480, 0x7e028480, + 0x7e048480, 0x7e068480, + 0x7e088480, 0x7e0a8480, + 0x7e0c8480, 0x7e0e8480, + 0xbefd0002, 0x80828802, + 0xbfa1fff5, 0xbe8200ff, + 0x80000000, 0x8b020002, + 0xbfa10012, 0xbefe00c1, + 0xbeff00c1, 0xd71f0001, + 0x0001007f, 0xd7200001, + 0x0002027e, 0x16020288, + 0xbe8200bf, 0xbefd00c1, + 0xd9382000, 0x00020201, + 0xd9386040, 0x00040401, + 0xd7006a01, 0x000202ff, + 0x00000400, 0x80828102, + 0xbfa1fff7, 0xbefd00ff, + 0x00000068, 0xbe804280, + 0xbe814280, 0xbe824280, + 0xbe834280, 0x80fd847d, + 0xbfa1fffa, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbfb00000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0xbf9f0000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 47b47d21f464..fe7c48f2fb2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1319,12 +1319,12 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v12_0_sw_init(void *handle) +static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id = 0; unsigned num_compute_rings; int xcc_id = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): @@ -1346,6 +1346,12 @@ static int gfx_v12_0_sw_init(void *handle) break; } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + default: + adev->gfx.enable_cleaner_shader = false; + break; + } + /* recalculate compute rings to use based on hardware configuration */ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe) / 2; @@ -1431,6 +1437,12 @@ static int gfx_v12_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!adev->enable_mes_kiq) { r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); if (r) { @@ -1460,6 +1472,10 @@ static int gfx_v12_0_sw_init(void *handle) gfx_v12_0_alloc_ip_dump(adev); + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } @@ -1492,10 +1508,10 @@ static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.rlc_autoload_ptr); } -static int gfx_v12_0_sw_fini(void *handle) +static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -1519,6 +1535,8 @@ static int gfx_v12_0_sw_fini(void *handle) gfx_v12_0_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); + kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); kfree(adev->gfx.ip_dump_gfx_queues); @@ -2601,7 +2619,7 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & @@ -3513,10 +3531,10 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) } } -static int gfx_v12_0_hw_init(void *handle) +static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { @@ -3603,9 +3621,9 @@ static int gfx_v12_0_hw_init(void *handle) return r; } -static int gfx_v12_0_hw_fini(void *handle) +static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -3643,14 +3661,14 @@ static int gfx_v12_0_hw_fini(void *handle) return 0; } -static int gfx_v12_0_suspend(void *handle) +static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_fini(handle); + return gfx_v12_0_hw_fini(ip_block); } -static int gfx_v12_0_resume(void *handle) +static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v12_0_hw_init(handle); + return gfx_v12_0_hw_init(ip_block); } static bool gfx_v12_0_is_idle(void *handle) @@ -3664,11 +3682,11 @@ static bool gfx_v12_0_is_idle(void *handle) return true; } -static int gfx_v12_0_wait_for_idle(void *handle) +static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -3695,9 +3713,9 @@ static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) return clock; } -static int gfx_v12_0_early_init(void *handle) +static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v12_0_gfx_funcs; @@ -3717,9 +3735,9 @@ static int gfx_v12_0_early_init(void *handle) return gfx_v12_0_init_microcode(adev); } -static int gfx_v12_0_late_init(void *handle) +static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -5022,8 +5040,6 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -5034,13 +5050,19 @@ static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v12_ip_print(void *handle, struct drm_printer *p) +static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Emit the cleaner shader */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ +} + +static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) +{ + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5102,9 +5124,9 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v12_ip_dump(void *handle) +static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); @@ -5297,7 +5319,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ .emit_ib = gfx_v12_0_ring_emit_ib_gfx, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5318,6 +5341,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { @@ -5336,7 +5362,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v12_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ - 8, /* gfx_v12_0_emit_mem_sync */ + 8 + /* gfx_v12_0_emit_mem_sync */ + 2, /* gfx_v12_0_ring_emit_cleaner_shader */ .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ .emit_ib = gfx_v12_0_ring_emit_ib_compute, .emit_fence = gfx_v12_0_ring_emit_fence, @@ -5353,6 +5380,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, + .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, + .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, + .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, }; static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 564f0b9336b6..41f50bf380c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3023,9 +3023,9 @@ static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { .start = gfx_v6_0_rlc_start }; -static int gfx_v6_0_early_init(void *handle) +static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; @@ -3039,10 +3039,10 @@ static int gfx_v6_0_early_init(void *handle) return 0; } -static int gfx_v6_0_sw_init(void *handle) +static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); @@ -3107,10 +3107,10 @@ static int gfx_v6_0_sw_init(void *handle) return r; } -static int gfx_v6_0_sw_fini(void *handle) +static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -3122,10 +3122,10 @@ static int gfx_v6_0_sw_fini(void *handle) return 0; } -static int gfx_v6_0_hw_init(void *handle) +static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_constants_init(adev); @@ -3142,9 +3142,9 @@ static int gfx_v6_0_hw_init(void *handle) return r; } -static int gfx_v6_0_hw_fini(void *handle) +static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v6_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); @@ -3153,18 +3153,14 @@ static int gfx_v6_0_hw_fini(void *handle) return 0; } -static int gfx_v6_0_suspend(void *handle) +static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_fini(adev); + return gfx_v6_0_hw_fini(ip_block); } -static int gfx_v6_0_resume(void *handle) +static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v6_0_hw_init(adev); + return gfx_v6_0_hw_init(ip_block); } static bool gfx_v6_0_is_idle(void *handle) @@ -3177,24 +3173,19 @@ static bool gfx_v6_0_is_idle(void *handle) return true; } -static int gfx_v6_0_wait_for_idle(void *handle) +static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v6_0_is_idle(handle)) + if (gfx_v6_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v6_0_soft_reset(void *handle) -{ - return 0; -} - static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -3444,7 +3435,6 @@ static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .name = "gfx_v6_0", .early_init = gfx_v6_0_early_init, - .late_init = NULL, .sw_init = gfx_v6_0_sw_init, .sw_fini = gfx_v6_0_sw_fini, .hw_init = gfx_v6_0_hw_init, @@ -3453,11 +3443,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .resume = gfx_v6_0_resume, .is_idle = gfx_v6_0_is_idle, .wait_for_idle = gfx_v6_0_wait_for_idle, - .soft_reset = gfx_v6_0_soft_reset, .set_clockgating_state = gfx_v6_0_set_clockgating_state, .set_powergating_state = gfx_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f146806c4633..824d5913103b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2559,7 +2559,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -2876,7 +2876,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ wb_gpu_addr = ring->rptr_gpu_addr; mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_hi = @@ -4134,9 +4134,9 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .update_spm_vmid = gfx_v7_0_update_spm_vmid }; -static int gfx_v7_0_early_init(void *handle) +static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; @@ -4151,9 +4151,9 @@ static int gfx_v7_0_early_init(void *handle) return 0; } -static int gfx_v7_0_late_init(void *handle) +static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4343,10 +4343,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, return 0; } -static int gfx_v7_0_sw_init(void *handle) +static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j, k, r, ring_id; switch (adev->asic_type) { @@ -4439,9 +4439,9 @@ static int gfx_v7_0_sw_init(void *handle) return r; } -static int gfx_v7_0_sw_fini(void *handle) +static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4465,10 +4465,10 @@ static int gfx_v7_0_sw_fini(void *handle) return 0; } -static int gfx_v7_0_hw_init(void *handle) +static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v7_0_constants_init(adev); @@ -4486,9 +4486,9 @@ static int gfx_v7_0_hw_init(void *handle) return r; } -static int gfx_v7_0_hw_fini(void *handle) +static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4499,18 +4499,14 @@ static int gfx_v7_0_hw_fini(void *handle) return 0; } -static int gfx_v7_0_suspend(void *handle) +static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_fini(adev); + return gfx_v7_0_hw_fini(ip_block); } -static int gfx_v7_0_resume(void *handle) +static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gfx_v7_0_hw_init(adev); + return gfx_v7_0_hw_init(ip_block); } static bool gfx_v7_0_is_idle(void *handle) @@ -4523,11 +4519,11 @@ static bool gfx_v7_0_is_idle(void *handle) return true; } -static int gfx_v7_0_wait_for_idle(void *handle) +static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4540,11 +4536,11 @@ static int gfx_v7_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v7_0_soft_reset(void *handle) +static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5009,8 +5005,6 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .soft_reset = gfx_v7_0_soft_reset, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bc8295812cc8..b7006c41e270 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1894,12 +1894,12 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); -static int gfx_v8_0_sw_init(void *handle) +static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (adev->asic_type) { case CHIP_TONGA: @@ -2037,9 +2037,9 @@ static int gfx_v8_0_sw_init(void *handle) return 0; } -static int gfx_v8_0_sw_fini(void *handle) +static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->gfx.num_gfx_rings; i++) @@ -4260,7 +4260,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -4783,10 +4783,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v8_0_cp_compute_enable(adev, enable); } -static int gfx_v8_0_hw_init(void *handle) +static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); @@ -4823,6 +4823,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); } + /* Submit unmap queue packet */ + amdgpu_ring_commit(kiq_ring); + /* + * Ring test will do a basic scratch register change check. Just run + * this to ensure that unmap queues that is submitted before got + * processed successfully before returning. + */ r = amdgpu_ring_test_helper(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); @@ -4865,13 +4872,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_wait_for_idle(void *handle) +static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v8_0_is_idle(handle)) + if (gfx_v8_0_is_idle(adev)) return 0; udelay(1); @@ -4879,9 +4886,9 @@ static int gfx_v8_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int gfx_v8_0_hw_fini(void *handle) +static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -4897,8 +4904,9 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - if (!gfx_v8_0_wait_for_idle(adev)) + if (!gfx_v8_0_wait_for_idle(ip_block)) gfx_v8_0_cp_enable(adev, false); else pr_err("cp is busy, skip halt cp\n"); @@ -4911,19 +4919,19 @@ static int gfx_v8_0_hw_fini(void *handle) return 0; } -static int gfx_v8_0_suspend(void *handle) +static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_fini(handle); + return gfx_v8_0_hw_fini(ip_block); } -static int gfx_v8_0_resume(void *handle) +static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v8_0_hw_init(handle); + return gfx_v8_0_hw_init(ip_block); } -static bool gfx_v8_0_check_soft_reset(void *handle) +static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -4983,9 +4991,9 @@ static bool gfx_v8_0_check_soft_reset(void *handle) } } -static int gfx_v8_0_pre_soft_reset(void *handle) +static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5024,9 +5032,9 @@ static int gfx_v8_0_pre_soft_reset(void *handle) return 0; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; @@ -5086,9 +5094,9 @@ static int gfx_v8_0_soft_reset(void *handle) return 0; } -static int gfx_v8_0_post_soft_reset(void *handle) +static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && @@ -5254,9 +5262,9 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q }; -static int gfx_v8_0_early_init(void *handle) +static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.xcc_mask = 1; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; @@ -5271,9 +5279,9 @@ static int gfx_v8_0_early_init(void *handle) return 0; } -static int gfx_v8_0_late_init(void *handle) +static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -6947,8 +6955,6 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, .get_clockgating_state = gfx_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 23f0573ae47b..0b6f09f2cc9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2198,12 +2198,12 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_0_sw_init(void *handle) +static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned int hw_prio; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { @@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle) } switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 2): + adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); + if (adev->gfx.mec_fw_version >= 88) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; default: adev->gfx.enable_cleaner_shader = false; break; @@ -2362,6 +2374,12 @@ static int gfx_v9_0_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); @@ -2390,7 +2408,7 @@ static int gfx_v9_0_sw_init(void *handle) gfx_v9_0_alloc_ip_dump(adev); - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; @@ -2398,10 +2416,10 @@ static int gfx_v9_0_sw_init(void *handle) } -static int gfx_v9_0_sw_fini(void *handle) +static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) { for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) @@ -2418,6 +2436,8 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); amdgpu_gfx_kiq_fini(adev, 0); + amdgpu_gfx_cleaner_shader_sw_fini(adev); + gfx_v9_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -2429,7 +2449,7 @@ static int gfx_v9_0_sw_fini(void *handle) } gfx_v9_0_free_microcode(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -3184,6 +3204,15 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); @@ -3265,8 +3294,8 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) * confirmed that the APU gfx10/gfx11 needn't such update. */ if (adev->flags & AMD_IS_APU && - adev->in_s3 && !adev->suspend_complete) { - DRM_INFO(" Will skip the CSB packet resubmit\n"); + adev->in_s3 && !pm_resume_via_firmware()) { + DRM_INFO("Will skip the CSB packet resubmit\n"); return 0; } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); @@ -3346,7 +3375,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = ring->rptr_gpu_addr; WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); @@ -3393,7 +3422,15 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[0].ring.sched.ready = false; } udelay(50); @@ -3914,6 +3951,10 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; } + if (adev->gfx.num_gfx_rings) + gfx_v9_0_cp_gfx_enable(adev, false); + gfx_v9_0_cp_compute_enable(adev, false); + r = gfx_v9_0_kiq_resume(adev); if (r) return r; @@ -3970,10 +4011,10 @@ static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v9_0_cp_compute_enable(adev, enable); } -static int gfx_v9_0_hw_init(void *handle) +static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -3999,9 +4040,9 @@ static int gfx_v9_0_hw_init(void *handle) return r; } -static int gfx_v9_0_hw_fini(void *handle) +static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); @@ -4051,14 +4092,14 @@ static int gfx_v9_0_hw_fini(void *handle) return 0; } -static int gfx_v9_0_suspend(void *handle) +static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_fini(handle); + return gfx_v9_0_hw_fini(ip_block); } -static int gfx_v9_0_resume(void *handle) +static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_0_hw_init(handle); + return gfx_v9_0_hw_init(ip_block); } static bool gfx_v9_0_is_idle(void *handle) @@ -4072,24 +4113,24 @@ static bool gfx_v9_0_is_idle(void *handle) return true; } -static int gfx_v9_0_wait_for_idle(void *handle) +static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_0_is_idle(handle)) + if (gfx_v9_0_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_0_soft_reset(void *handle) +static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); @@ -4745,9 +4786,9 @@ fail: return r; } -static int gfx_v9_0_early_init(void *handle) +static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.funcs = &gfx_v9_0_gfx_funcs; @@ -4771,9 +4812,9 @@ static int gfx_v9_0_early_init(void *handle) return gfx_v9_0_init_microcode(adev); } -static int gfx_v9_0_ecc_late_init(void *handle) +static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* @@ -4805,9 +4846,9 @@ static int gfx_v9_0_ecc_late_init(void *handle) return 0; } -static int gfx_v9_0_late_init(void *handle) +static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -4822,7 +4863,7 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - r = gfx_v9_0_ecc_late_init(handle); + r = gfx_v9_0_ecc_late_init(ip_block); if (r) return r; @@ -7167,8 +7208,6 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -7179,8 +7218,7 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) @@ -7237,10 +7275,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (!adev->debug_exp_resets && - !adev->gfx.num_gfx_rings) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -7316,9 +7350,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, return amdgpu_ring_test_ring(ring); } -static void gfx_v9_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); @@ -7356,9 +7390,9 @@ static void gfx_v9_ip_print(void *handle, struct drm_printer *p) } -static void gfx_v9_ip_dump(void *handle) +static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k, reg, index = 0; uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h index 36c0292b5110..0b6bd09b7529 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2024 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,3 +24,45 @@ static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = { /* Add the cleaner shader code here */ }; + +/* Define the cleaner shader gfx_9_4_2 */ +static const u32 gfx_9_4_2_cleaner_shader_hex[] = { + 0xbf068100, 0xbf84003b, + 0xbf8a0000, 0xb07c0000, + 0xbe8200ff, 0x00000078, + 0xbf110802, 0x7e000280, + 0x7e020280, 0x7e040280, + 0x7e060280, 0x7e080280, + 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x80828802, + 0xbe803202, 0xbf84fff5, + 0xbf9c0000, 0xbe8200ff, + 0x80000000, 0x86020102, + 0xbf840011, 0xbefe00c1, + 0xbeff00c1, 0xd28c0001, + 0x0001007f, 0xd28d0001, + 0x0002027e, 0x10020288, + 0xbe8200bf, 0xbefc00c1, + 0xd89c2000, 0x00020201, + 0xd89c6040, 0x00040401, + 0x320202ff, 0x00000400, + 0x80828102, 0xbf84fff8, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea0180, + 0xbeec0180, 0xbeee0180, + 0xbef00180, 0xbef20180, + 0xbef40180, 0xbef60180, + 0xbef80180, 0xbefa0180, + 0xbf810000, 0xbf8d0001, + 0xbefc00ff, 0x0000005c, + 0xbf800000, 0xbe802c80, + 0xbe812c80, 0xbe822c80, + 0xbe832c80, 0x80fc847c, + 0xbf84fffa, 0xbee60080, + 0xbee70080, 0xbeea01ff, + 0x000000ee, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm new file mode 100644 index 000000000000..35b8cf9070bd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader. +//To turn this shader program on for complitaion change this to main and lower shader main to main_1 + +// MI200 : Clear SGPRs, VGPRs and LDS +// Uses two kernels launched separately: +// 1. Clean VGPRs, LDS, and lower SGPRs +// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU +// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD +// Waves in the workgroup share the 64KB of LDS +// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383 +// Each wave clears 128 VGPRs, so all 512 in the SIMD +// The first wave of the workgroup clears its 64KB of LDS +// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup +// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared. +// 2. Clean remaining SGPRs +// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU +// Waves are allocating 96 SGPRs +// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223. +// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799 +// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER +// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command +// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799 + +shader main + asic(MI200) + type(CS) + wave_size(64) +// Note: original source code from SQ team + +// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks) + + s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3 + s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1) + S_BARRIER + + s_movk_i32 m0, 0x0000 + s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance) + // + // CLEAR VGPRs + // + s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing +label_0005: + v_mov_b32 v0, 0 + v_mov_b32 v1, 0 + v_mov_b32 v2, 0 + v_mov_b32 v3, 0 + v_mov_b32 v4, 0 + v_mov_b32 v5, 0 + v_mov_b32 v6, 0 + v_mov_b32 v7, 0 + s_sub_u32 s2, s2, 8 + s_set_gpr_idx_idx s2 + s_cbranch_scc0 label_0005 + s_set_gpr_idx_off + + // + // + + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup + // CLEAR LDS + // + s_mov_b32 exec_lo, 0xffffffff + s_mov_b32 exec_hi, 0xffffffff + v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63) + v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63) + v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte) + s_mov_b32 s2, 0x00000003f // 64 loop iterations + s_mov_b32 m0, 0xffffffff + // Clear all of LDS space + // Each FirstWave of WorkGroup clears 64kbyte block + +label_001F: + ds_write2_b64 v1, v[2:3], v[2:3] offset1:32 + ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96 + v_add_co_u32 v1, vcc, 0x00000400, v1 + s_sub_u32 s2, s2, 1 + s_cbranch_scc0 label_001F + // + // CLEAR SGPRs + // +label_clean_sgpr_1: + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop: + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0 //clear vcc + s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1 + s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3 + s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5 + s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7 + s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9 + s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11 + s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13 + s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15 +s_endpgm + +label_0023: + + s_sethalt 1 + + s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance) + s_nop 0 +label_sgpr_loop1: + + s_movreld_b32 s0, 0 + s_movreld_b32 s1, 0 + s_movreld_b32 s2, 0 + s_movreld_b32 s3, 0 + s_sub_u32 m0, m0, 4 + s_cbranch_scc0 label_sgpr_loop1 + + //clear vcc, flat scratch + s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR + s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR + s_mov_b64 vcc, 0xee //clear vcc + +s_endpgm +end + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c100845409f7..e2b3dda57030 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1049,10 +1049,10 @@ static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) } } -static int gfx_v9_4_3_sw_init(void *handle) +static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) { int i, j, k, r, ring_id, xcc_id, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): @@ -1157,6 +1157,19 @@ static int gfx_v9_4_3_sw_init(void *handle) return r; } + adev->gfx.compute_supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + if (adev->gfx.mec_fw_version >= 155) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; + } + break; + default: + break; + } r = gfx_v9_4_3_gpu_early_init(adev); if (r) return r; @@ -1165,26 +1178,19 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; - - if (!amdgpu_sriov_vf(adev)) { - r = amdgpu_gfx_sysfs_init(adev); - if (r) - return r; - } - - gfx_v9_4_3_alloc_ip_dump(adev); - - r = amdgpu_gfx_sysfs_isolation_shader_init(adev); + r = amdgpu_gfx_sysfs_init(adev); if (r) return r; + gfx_v9_4_3_alloc_ip_dump(adev); + return 0; } -static int gfx_v9_4_3_sw_fini(void *handle) +static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block) { int i, num_xcc; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) @@ -1201,9 +1207,7 @@ static int gfx_v9_4_3_sw_fini(void *handle) gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); - if (!amdgpu_sriov_vf(adev)) - amdgpu_gfx_sysfs_fini(adev); - amdgpu_gfx_sysfs_isolation_shader_fini(adev); + amdgpu_gfx_sysfs_fini(adev); kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_compute_queues); @@ -1247,8 +1251,10 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); @@ -2343,10 +2349,10 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } -static int gfx_v9_4_3_hw_init(void *handle) +static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, adev->gfx.cleaner_shader_ptr); @@ -2367,9 +2373,9 @@ static int gfx_v9_4_3_hw_init(void *handle) return r; } -static int gfx_v9_4_3_hw_fini(void *handle) +static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); @@ -2384,14 +2390,14 @@ static int gfx_v9_4_3_hw_fini(void *handle) return 0; } -static int gfx_v9_4_3_suspend(void *handle) +static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_fini(handle); + return gfx_v9_4_3_hw_fini(ip_block); } -static int gfx_v9_4_3_resume(void *handle) +static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block) { - return gfx_v9_4_3_hw_init(handle); + return gfx_v9_4_3_hw_init(ip_block); } static bool gfx_v9_4_3_is_idle(void *handle) @@ -2408,24 +2414,24 @@ static bool gfx_v9_4_3_is_idle(void *handle) return true; } -static int gfx_v9_4_3_wait_for_idle(void *handle) +static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v9_4_3_is_idle(handle)) + if (gfx_v9_4_3_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int gfx_v9_4_3_soft_reset(void *handle) +static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) { u32 grbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* GRBM_STATUS */ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); @@ -2509,9 +2515,9 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v9_4_3_early_init(void *handle) +static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); @@ -2527,9 +2533,9 @@ static int gfx_v9_4_3_early_init(void *handle) return gfx_v9_4_3_init_microcode(adev); } -static int gfx_v9_4_3_late_init(void *handle) +static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); @@ -3056,9 +3062,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint32_t value = 0; - if (!adev->debug_exp_resets) - return; - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); @@ -3574,9 +3577,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int r; - if (!adev->debug_exp_resets) - return -EINVAL; - if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -4567,8 +4567,6 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) { - int i; - /* Header itself is a NOP packet */ if (num_nop == 1) { amdgpu_ring_write(ring, ring->funcs->nop); @@ -4579,13 +4577,12 @@ static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_no amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); /* Header is at index 0, followed by num_nops - 1 NOP packet's */ - for (i = 1; i < num_nop; i++) - amdgpu_ring_write(ring, ring->funcs->nop); + amdgpu_ring_insert_nop(ring, num_nop - 1); } -static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) +static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t xcc_id, xcc_offset, inst_offset; uint32_t num_xcc, reg, num_inst; @@ -4643,9 +4640,9 @@ static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) } } -static void gfx_v9_4_3_ip_dump(void *handle) +static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t i, j, k; uint32_t num_xcc, reg, num_inst; uint32_t xcc_id, xcc_offset, inst_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 9784a2892185..697599c46240 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -175,7 +175,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); @@ -630,9 +633,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) } -static int gmc_v10_0_early_init(void *handle) +static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_set_mmhub_funcs(adev); gmc_v10_0_set_gfxhub_funcs(adev); @@ -651,9 +654,9 @@ static int gmc_v10_0_early_init(void *handle) return 0; } -static int gmc_v10_0_late_init(void *handle) +static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -769,10 +772,10 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v10_0_sw_init(void *handle) +static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->gfxhub.funcs->init(adev); @@ -920,9 +923,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v10_0_sw_fini(void *handle) +static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); @@ -985,9 +988,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v10_0_hw_init(void *handle) +static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -1032,9 +1035,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v10_0_hw_fini(void *handle) +static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v10_0_gart_disable(adev); @@ -1053,25 +1056,22 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } -static int gmc_v10_0_suspend(void *handle) +static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v10_0_hw_fini(adev); + gmc_v10_0_hw_fini(ip_block); return 0; } -static int gmc_v10_0_resume(void *handle) +static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v10_0_hw_init(adev); + r = gmc_v10_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1082,17 +1082,12 @@ static bool gmc_v10_0_is_idle(void *handle) return true; } -static int gmc_v10_0_wait_for_idle(void *handle) +static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v10.*/ return 0; } -static int gmc_v10_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v10_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1154,7 +1149,6 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .resume = gmc_v10_0_resume, .is_idle = gmc_v10_0_is_idle, .wait_for_idle = gmc_v10_0_wait_for_idle, - .soft_reset = gmc_v10_0_soft_reset, .set_clockgating_state = gmc_v10_0_set_clockgating_state, .set_powergating_state = gmc_v10_0_set_powergating_state, .get_clockgating_state = gmc_v10_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2797fd84432b..f893ab4c14df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -144,7 +144,10 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -601,9 +604,9 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v11_0_early_init(void *handle) +static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v11_0_set_gfxhub_funcs(adev); gmc_v11_0_set_mmhub_funcs(adev); @@ -622,9 +625,9 @@ static int gmc_v11_0_early_init(void *handle) return 0; } -static int gmc_v11_0_late_init(void *handle) +static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -729,10 +732,10 @@ static int gmc_v11_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v11_0_sw_init(void *handle) +static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -849,9 +852,9 @@ static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v11_0_sw_fini(void *handle) +static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v11_0_gart_fini(adev); @@ -908,9 +911,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v11_0_hw_init(void *handle) +static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; @@ -940,9 +943,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v11_0_hw_fini(void *handle) +static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -961,25 +964,22 @@ static int gmc_v11_0_hw_fini(void *handle) return 0; } -static int gmc_v11_0_suspend(void *handle) +static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v11_0_hw_fini(adev); + gmc_v11_0_hw_fini(ip_block); return 0; } -static int gmc_v11_0_resume(void *handle) +static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v11_0_hw_init(adev); + r = gmc_v11_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -990,17 +990,12 @@ static bool gmc_v11_0_is_idle(void *handle) return true; } -static int gmc_v11_0_wait_for_idle(void *handle) +static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v11_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v11_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1041,7 +1036,6 @@ const struct amd_ip_funcs gmc_v11_0_ip_funcs = { .resume = gmc_v11_0_resume, .is_idle = gmc_v11_0_is_idle, .wait_for_idle = gmc_v11_0_wait_for_idle, - .soft_reset = gmc_v11_0_soft_reset, .set_clockgating_state = gmc_v11_0_set_clockgating_state, .set_powergating_state = gmc_v11_0_set_powergating_state, .get_clockgating_state = gmc_v11_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index edcb5351f8cc..d22b027fd0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -137,7 +137,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (status != 0) hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } @@ -604,9 +607,9 @@ static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev) } } -static int gmc_v12_0_early_init(void *handle) +static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v12_0_set_gfxhub_funcs(adev); gmc_v12_0_set_mmhub_funcs(adev); @@ -624,9 +627,9 @@ static int gmc_v12_0_early_init(void *handle) return 0; } -static int gmc_v12_0_late_init(void *handle) +static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -731,10 +734,10 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static int gmc_v12_0_sw_init(void *handle) +static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->mmhub.funcs->init(adev); @@ -841,9 +844,9 @@ static void gmc_v12_0_gart_fini(struct amdgpu_device *adev) amdgpu_gart_table_vram_free(adev); } -static int gmc_v12_0_sw_fini(void *handle) +static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_vm_manager_fini(adev); gmc_v12_0_gart_fini(adev); @@ -894,10 +897,10 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v12_0_hw_init(void *handle) +static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* The sequence of these two function calls matters.*/ gmc_v12_0_init_golden_registers(adev); @@ -924,9 +927,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v12_0_hw_fini(void *handle) +static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ @@ -945,25 +948,22 @@ static int gmc_v12_0_hw_fini(void *handle) return 0; } -static int gmc_v12_0_suspend(void *handle) +static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v12_0_hw_fini(adev); + gmc_v12_0_hw_fini(ip_block); return 0; } -static int gmc_v12_0_resume(void *handle) +static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v12_0_hw_init(adev); + r = gmc_v12_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -974,17 +974,12 @@ static bool gmc_v12_0_is_idle(void *handle) return true; } -static int gmc_v12_0_wait_for_idle(void *handle) +static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v11.*/ return 0; } -static int gmc_v12_0_soft_reset(void *handle) -{ - return 0; -} - static int gmc_v12_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1025,7 +1020,6 @@ const struct amd_ip_funcs gmc_v12_0_ip_funcs = { .resume = gmc_v12_0_resume, .is_idle = gmc_v12_0_is_idle, .wait_for_idle = gmc_v12_0_wait_for_idle, - .soft_reset = gmc_v12_0_soft_reset, .set_clockgating_state = gmc_v12_0_set_clockgating_state, .set_powergating_state = gmc_v12_0_set_powergating_state, .get_clockgating_state = gmc_v12_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d36725666b54..ca000b3d1afc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -43,7 +43,7 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v6_0_wait_for_idle(void *handle); +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); @@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin"); static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v6_0_wait_for_idle((void *)adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v6_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -213,6 +218,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { int i, j; + struct amdgpu_ip_block *ip_block; + /* Initialize HDP */ for (i = 0, j = 0; i < 32; i++, j += 0x6) { @@ -224,7 +231,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v6_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -251,7 +262,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v6_0_wait_for_idle((void *)adev)) + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -762,9 +773,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v6_0_early_init(void *handle) +static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_set_gmc_funcs(adev); gmc_v6_0_set_irq_funcs(adev); @@ -772,9 +783,9 @@ static int gmc_v6_0_early_init(void *handle) return 0; } -static int gmc_v6_0_late_init(void *handle) +static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -799,10 +810,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v6_0_sw_init(void *handle) +static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -876,9 +887,9 @@ static int gmc_v6_0_sw_init(void *handle) return 0; } -static int gmc_v6_0_sw_fini(void *handle) +static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -889,10 +900,10 @@ static int gmc_v6_0_sw_fini(void *handle) return 0; } -static int gmc_v6_0_hw_init(void *handle) +static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v6_0_mc_program(adev); @@ -914,9 +925,9 @@ static int gmc_v6_0_hw_init(void *handle) return 0; } -static int gmc_v6_0_hw_fini(void *handle) +static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v6_0_gart_disable(adev); @@ -924,21 +935,19 @@ static int gmc_v6_0_hw_fini(void *handle) return 0; } -static int gmc_v6_0_suspend(void *handle) +static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v6_0_hw_fini(adev); + gmc_v6_0_hw_fini(ip_block); return 0; } -static int gmc_v6_0_resume(void *handle) +static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; - r = gmc_v6_0_hw_init(adev); + r = gmc_v6_0_hw_init(ip_block); if (r) return r; @@ -950,6 +959,7 @@ static int gmc_v6_0_resume(void *handle) static bool gmc_v6_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | @@ -959,13 +969,13 @@ static bool gmc_v6_0_is_idle(void *handle) return true; } -static int gmc_v6_0_wait_for_idle(void *handle) +static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (gmc_v6_0_is_idle(handle)) + if (gmc_v6_0_is_idle(adev)) return 0; udelay(1); } @@ -973,9 +983,10 @@ static int gmc_v6_0_wait_for_idle(void *handle) } -static int gmc_v6_0_soft_reset(void *handle) +static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -992,7 +1003,8 @@ static int gmc_v6_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v6_0_mc_stop(adev); - if (gmc_v6_0_wait_for_idle(adev)) + + if (gmc_v6_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1109,8 +1121,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { .soft_reset = gmc_v6_0_soft_reset, .set_clockgating_state = gmc_v6_0_set_clockgating_state, .set_powergating_state = gmc_v6_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 994432fb57ea..07f45f1a503a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -52,7 +52,7 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v7_0_wait_for_idle(void *handle); +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); @@ -921,9 +921,9 @@ static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v7_0_early_init(void *handle) +static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_set_gmc_funcs(adev); gmc_v7_0_set_irq_funcs(adev); @@ -940,9 +940,9 @@ static int gmc_v7_0_early_init(void *handle) return 0; } -static int gmc_v7_0_late_init(void *handle) +static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -968,10 +968,10 @@ static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static int gmc_v7_0_sw_init(void *handle) +static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1060,9 +1060,9 @@ static int gmc_v7_0_sw_init(void *handle) return 0; } -static int gmc_v7_0_sw_fini(void *handle) +static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1074,10 +1074,10 @@ static int gmc_v7_0_sw_fini(void *handle) return 0; } -static int gmc_v7_0_hw_init(void *handle) +static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v7_0_init_golden_registers(adev); @@ -1101,9 +1101,9 @@ static int gmc_v7_0_hw_init(void *handle) return 0; } -static int gmc_v7_0_hw_fini(void *handle) +static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v7_0_gart_disable(adev); @@ -1111,25 +1111,22 @@ static int gmc_v7_0_hw_fini(void *handle) return 0; } -static int gmc_v7_0_suspend(void *handle) +static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v7_0_hw_fini(adev); + gmc_v7_0_hw_fini(ip_block); return 0; } -static int gmc_v7_0_resume(void *handle) +static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v7_0_hw_init(adev); + r = gmc_v7_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1146,11 +1143,11 @@ static bool gmc_v7_0_is_idle(void *handle) return true; } -static int gmc_v7_0_wait_for_idle(void *handle) +static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1167,9 +1164,9 @@ static int gmc_v7_0_wait_for_idle(void *handle) } -static int gmc_v7_0_soft_reset(void *handle) +static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1351,8 +1348,6 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .soft_reset = gmc_v7_0_soft_reset, .set_clockgating_state = gmc_v7_0_set_clockgating_state, .set_powergating_state = gmc_v7_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 86488c052f82..12d5967ecd45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -53,7 +53,7 @@ static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); -static int gmc_v8_0_wait_for_idle(void *handle); +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; + struct amdgpu_ip_block *ip_block; - gmc_v8_0_wait_for_idle(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + gmc_v8_0_wait_for_idle(ip_block); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -426,6 +431,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; u32 tmp; int i, j; @@ -439,7 +445,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v8_0_wait_for_idle((void *)adev)) + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!ip_block) + return; + + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); if (adev->mode_info.num_crtc) { @@ -474,7 +484,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); - if (gmc_v8_0_wait_for_idle((void *)adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -1027,9 +1037,9 @@ static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) } } -static int gmc_v8_0_early_init(void *handle) +static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_set_gmc_funcs(adev); gmc_v8_0_set_irq_funcs(adev); @@ -1046,9 +1056,9 @@ static int gmc_v8_0_early_init(void *handle) return 0; } -static int gmc_v8_0_late_init(void *handle) +static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); @@ -1076,10 +1086,10 @@ static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) #define mmMC_SEQ_MISC0_FIJI 0xA71 -static int gmc_v8_0_sw_init(void *handle) +static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); @@ -1173,9 +1183,9 @@ static int gmc_v8_0_sw_init(void *handle) return 0; } -static int gmc_v8_0_sw_fini(void *handle) +static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1187,10 +1197,10 @@ static int gmc_v8_0_sw_fini(void *handle) return 0; } -static int gmc_v8_0_hw_init(void *handle) +static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v8_0_init_golden_registers(adev); @@ -1222,9 +1232,9 @@ static int gmc_v8_0_hw_init(void *handle) return 0; } -static int gmc_v8_0_hw_fini(void *handle) +static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v8_0_gart_disable(adev); @@ -1232,25 +1242,22 @@ static int gmc_v8_0_hw_fini(void *handle) return 0; } -static int gmc_v8_0_suspend(void *handle) +static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - gmc_v8_0_hw_fini(adev); + gmc_v8_0_hw_fini(ip_block); return 0; } -static int gmc_v8_0_resume(void *handle) +static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v8_0_hw_init(adev); + r = gmc_v8_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -1267,11 +1274,11 @@ static bool gmc_v8_0_is_idle(void *handle) return true; } -static int gmc_v8_0_wait_for_idle(void *handle) +static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1289,10 +1296,10 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static bool gmc_v8_0_check_soft_reset(void *handle) +static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__VMC_BUSY_MASK) @@ -1316,23 +1323,23 @@ static bool gmc_v8_0_check_soft_reset(void *handle) return false; } -static int gmc_v8_0_pre_soft_reset(void *handle) +static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; gmc_v8_0_mc_stop(adev); - if (gmc_v8_0_wait_for_idle(adev)) + if (gmc_v8_0_wait_for_idle(ip_block)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); return 0; } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->gmc.srbm_soft_reset) @@ -1361,9 +1368,9 @@ static int gmc_v8_0_soft_reset(void *handle) return 0; } -static int gmc_v8_0_post_soft_reset(void *handle) +static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->gmc.srbm_soft_reset) return 0; @@ -1715,8 +1722,6 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7a45f3fdc734..50c5da3020cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -672,6 +672,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; + /* Only print L2 fault status if the status register could be read and + * contains useful information + */ + if (!status) + return 0; + if (!amdgpu_sriov_vf(adev)) WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); @@ -1390,14 +1396,44 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) } static enum amdgpu_memory_partition +gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } + + return AMDGPU_NPS1_PARTITION_MODE; +} + +static enum amdgpu_memory_partition gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) - return AMDGPU_NPS1_PARTITION_MODE; + return gmc_v9_0_query_vf_memory_partition(adev); return gmc_v9_0_get_memory_partition(adev, NULL); } +static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) +{ + if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && + adev->nbio.funcs->is_nps_switch_requested(adev)) { + adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS; + return true; + } + + return false; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -1409,6 +1445,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, + .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -1548,9 +1586,31 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) adev->gmc.xgmi.ras = &xgmi_ras; } -static int gmc_v9_0_early_init(void *handle) +static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->gmc.supported_nps_modes = 0; + + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) + return; + + /*TODO: Check PSP version also which supports NPS switch. Otherwise keep + * supported modes as 0. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + adev->gmc.supported_nps_modes = + BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + break; + default: + break; + } +} + +static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; /* * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined @@ -1604,9 +1664,9 @@ static int gmc_v9_0_early_init(void *handle) return 0; } -static int gmc_v9_0_late_init(void *handle) +static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_gmc_allocate_vm_inv_eng(adev); @@ -1903,6 +1963,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, switch (mode) { case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; case AMDGPU_NPS1_PARTITION_MODE: adev->gmc.num_mem_partitions = 1; break; @@ -1922,7 +1984,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, /* Use NPS range info, if populated */ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - adev->gmc.num_mem_partitions); + &adev->gmc.num_mem_partitions); if (!r) { l = 0; for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { @@ -1932,6 +1994,11 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, } } else { + if (!adev->gmc.num_mem_partitions) { + dev_err(adev->dev, + "Not able to detect NPS mode, fall back to NPS1"); + adev->gmc.num_mem_partitions = 1; + } /* Fallback to sw based calculation */ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; size /= adev->gmc.num_mem_partitions; @@ -1990,10 +2057,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) adev->gmc.vram_width = 128 * 64; } -static int gmc_v9_0_sw_init(void *handle) +static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned long inst_mask = adev->aid_mask; adev->gfxhub.funcs->init(adev); @@ -2168,6 +2235,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + gmc_v9_0_init_nps_details(adev); /* * number of VMs * VMID 0 is reserved for System @@ -2201,9 +2269,9 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -static int gmc_v9_0_sw_fini(void *handle) +static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) @@ -2311,9 +2379,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_hw_init(void *handle) +static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool value; int i, r; @@ -2396,9 +2464,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) adev->mmhub.funcs->gart_disable(adev); } -static int gmc_v9_0_hw_fini(void *handle) +static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; gmc_v9_0_gart_disable(adev); @@ -2416,32 +2484,44 @@ static int gmc_v9_0_hw_fini(void *handle) if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, false); - amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + /* + * For minimal init, late_init is not called, hence VM fault/RAS irqs + * are not enabled. + */ + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - if (adev->gmc.ecc_irq.funcs && - amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + if (adev->gmc.ecc_irq.funcs && + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + } return 0; } -static int gmc_v9_0_suspend(void *handle) +static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return gmc_v9_0_hw_fini(adev); + return gmc_v9_0_hw_fini(ip_block); } -static int gmc_v9_0_resume(void *handle) +static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gmc_v9_0_hw_init(adev); + /* If a reset is done for NPS mode switch, read the memory range + * information again. + */ + if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { + gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; + } + + r = gmc_v9_0_hw_init(ip_block); if (r) return r; - amdgpu_vmid_reset_all(adev); + amdgpu_vmid_reset_all(ip_block->adev); return 0; } @@ -2452,13 +2532,13 @@ static bool gmc_v9_0_is_idle(void *handle) return true; } -static int gmc_v9_0_wait_for_idle(void *handle) +static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* There is no need to wait for MC idle in GMC v9.*/ return 0; } -static int gmc_v9_0_soft_reset(void *handle) +static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX for emulation.*/ return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 07984f7c3ae7..7f45e93c0397 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -273,9 +273,9 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev, WREG32(mmIH_RB_RPTR, ih->rptr); } -static int iceland_ih_early_init(void *handle) +static int iceland_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -287,10 +287,10 @@ static int iceland_ih_early_init(void *handle) return 0; } -static int iceland_ih_sw_init(void *handle) +static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -301,9 +301,9 @@ static int iceland_ih_sw_init(void *handle) return r; } -static int iceland_ih_sw_fini(void *handle) +static int iceland_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -311,34 +311,28 @@ static int iceland_ih_sw_fini(void *handle) return 0; } -static int iceland_ih_hw_init(void *handle) +static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return iceland_ih_irq_init(adev); } -static int iceland_ih_hw_fini(void *handle) +static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - iceland_ih_irq_disable(adev); + iceland_ih_irq_disable(ip_block->adev); return 0; } -static int iceland_ih_suspend(void *handle) +static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_fini(adev); + return iceland_ih_hw_fini(ip_block); } -static int iceland_ih_resume(void *handle) +static int iceland_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return iceland_ih_hw_init(adev); + return iceland_ih_hw_init(ip_block); } static bool iceland_ih_is_idle(void *handle) @@ -352,11 +346,11 @@ static bool iceland_ih_is_idle(void *handle) return true; } -static int iceland_ih_wait_for_idle(void *handle) +static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -368,10 +362,10 @@ static int iceland_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int iceland_ih_soft_reset(void *handle) +static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -413,7 +407,6 @@ static int iceland_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs iceland_ih_ip_funcs = { .name = "iceland_ih", .early_init = iceland_ih_early_init, - .late_init = NULL, .sw_init = iceland_ih_sw_init, .sw_fini = iceland_ih_sw_fini, .hw_init = iceland_ih_hw_init, @@ -425,8 +418,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { .soft_reset = iceland_ih_soft_reset, .set_clockgating_state = iceland_ih_set_clockgating_state, .set_powergating_state = iceland_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs iceland_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 18a761d6ef33..38f953fd65d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -559,19 +559,19 @@ static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs; } -static int ih_v6_0_early_init(void *handle) +static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v6_0_set_interrupt_funcs(adev); ih_v6_0_set_self_irq_funcs(adev); return 0; } -static int ih_v6_0_sw_init(void *handle) +static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -614,19 +614,19 @@ static int ih_v6_0_sw_init(void *handle) return r; } -static int ih_v6_0_sw_fini(void *handle) +static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_0_hw_init(void *handle) +static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_0_irq_init(adev); if (r) @@ -635,27 +635,21 @@ static int ih_v6_0_hw_init(void *handle) return 0; } -static int ih_v6_0_hw_fini(void *handle) +static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_0_irq_disable(adev); + ih_v6_0_irq_disable(ip_block->adev); return 0; } -static int ih_v6_0_suspend(void *handle) +static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_fini(adev); + return ih_v6_0_hw_fini(ip_block); } -static int ih_v6_0_resume(void *handle) +static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_0_hw_init(adev); + return ih_v6_0_hw_init(ip_block); } static bool ih_v6_0_is_idle(void *handle) @@ -664,13 +658,13 @@ static bool ih_v6_0_is_idle(void *handle) return true; } -static int ih_v6_0_wait_for_idle(void *handle) +static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_0_soft_reset(void *handle) +static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -785,7 +779,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .name = "ih_v6_0", .early_init = ih_v6_0_early_init, - .late_init = NULL, .sw_init = ih_v6_0_sw_init, .sw_fini = ih_v6_0_sw_fini, .hw_init = ih_v6_0_hw_init, @@ -798,8 +791,6 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .set_clockgating_state = ih_v6_0_set_clockgating_state, .set_powergating_state = ih_v6_0_set_powergating_state, .get_clockgating_state = ih_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 2e0469feca1e..61381e0c3795 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -532,9 +532,9 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs; } -static int ih_v6_1_early_init(void *handle) +static int ih_v6_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -547,10 +547,10 @@ static int ih_v6_1_early_init(void *handle) return 0; } -static int ih_v6_1_sw_init(void *handle) +static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -593,19 +593,19 @@ static int ih_v6_1_sw_init(void *handle) return r; } -static int ih_v6_1_sw_fini(void *handle) +static int ih_v6_1_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v6_1_hw_init(void *handle) +static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v6_1_irq_init(adev); if (r) @@ -614,27 +614,21 @@ static int ih_v6_1_hw_init(void *handle) return 0; } -static int ih_v6_1_hw_fini(void *handle) +static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v6_1_irq_disable(adev); + ih_v6_1_irq_disable(ip_block->adev); return 0; } -static int ih_v6_1_suspend(void *handle) +static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_fini(adev); + return ih_v6_1_hw_fini(ip_block); } -static int ih_v6_1_resume(void *handle) +static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v6_1_hw_init(adev); + return ih_v6_1_hw_init(ip_block); } static bool ih_v6_1_is_idle(void *handle) @@ -643,13 +637,13 @@ static bool ih_v6_1_is_idle(void *handle) return true; } -static int ih_v6_1_wait_for_idle(void *handle) +static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v6_1_soft_reset(void *handle) +static int ih_v6_1_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -768,7 +762,6 @@ static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .name = "ih_v6_1", .early_init = ih_v6_1_early_init, - .late_init = NULL, .sw_init = ih_v6_1_sw_init, .sw_fini = ih_v6_1_sw_fini, .hw_init = ih_v6_1_hw_init, @@ -781,8 +774,6 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .set_clockgating_state = ih_v6_1_set_clockgating_state, .set_powergating_state = ih_v6_1_set_powergating_state, .get_clockgating_state = ih_v6_1_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 6852081fcff2..d2428cf5d385 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -528,19 +528,19 @@ static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs; } -static int ih_v7_0_early_init(void *handle) +static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ih_v7_0_set_interrupt_funcs(adev); ih_v7_0_set_self_irq_funcs(adev); return 0; } -static int ih_v7_0_sw_init(void *handle) +static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, @@ -583,19 +583,19 @@ static int ih_v7_0_sw_init(void *handle) return r; } -static int ih_v7_0_sw_fini(void *handle) +static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int ih_v7_0_hw_init(void *handle) +static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = ih_v7_0_irq_init(adev); if (r) @@ -604,27 +604,21 @@ static int ih_v7_0_hw_init(void *handle) return 0; } -static int ih_v7_0_hw_fini(void *handle) +static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ih_v7_0_irq_disable(adev); + ih_v7_0_irq_disable(ip_block->adev); return 0; } -static int ih_v7_0_suspend(void *handle) +static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_fini(adev); + return ih_v7_0_hw_fini(ip_block); } -static int ih_v7_0_resume(void *handle) +static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return ih_v7_0_hw_init(adev); + return ih_v7_0_hw_init(ip_block); } static bool ih_v7_0_is_idle(void *handle) @@ -633,13 +627,13 @@ static bool ih_v7_0_is_idle(void *handle) return true; } -static int ih_v7_0_wait_for_idle(void *handle) +static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int ih_v7_0_soft_reset(void *handle) +static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -758,7 +752,6 @@ static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .name = "ih_v7_0", .early_init = ih_v7_0_early_init, - .late_init = NULL, .sw_init = ih_v7_0_sw_init, .sw_fini = ih_v7_0_sw_fini, .hw_init = ih_v7_0_hw_init, @@ -771,8 +764,6 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .set_clockgating_state = ih_v7_0_set_clockgating_state, .set_powergating_state = ih_v7_0_set_powergating_state, .get_clockgating_state = ih_v7_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 6e0e88076224..03b8b7cd5229 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -458,13 +458,13 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, /** * jpeg_v1_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -int jpeg_v1_0_early_init(void *handle) +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -478,12 +478,12 @@ int jpeg_v1_0_early_init(void *handle) /** * jpeg_v1_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -int jpeg_v1_0_sw_init(void *handle) +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -509,13 +509,13 @@ int jpeg_v1_0_sw_init(void *handle) /** * jpeg_v1_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG free up sw allocation */ -void jpeg_v1_0_sw_fini(void *handle) +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_ring_fini(adev->jpeg.inst->ring_dec); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h index 9654d22e0376..097328635083 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -24,9 +24,9 @@ #ifndef __JPEG_V1_0_H__ #define __JPEG_V1_0_H__ -int jpeg_v1_0_early_init(void *handle); -int jpeg_v1_0_sw_init(void *handle); -void jpeg_v1_0_sw_fini(void *handle); +int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block); +int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block); +void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block); void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); #define JPEG_V1_REG_RANGE_START 0x8000 diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 41c0f8750dc1..d6823fb45d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -41,13 +41,13 @@ static int jpeg_v2_0_set_powergating_state(void *handle, /** * jpeg_v2_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_0_early_init(void *handle) +static int jpeg_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -61,13 +61,13 @@ static int jpeg_v2_0_early_init(void *handle) /** * jpeg_v2_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_0_sw_init(void *handle) +static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -104,14 +104,14 @@ static int jpeg_v2_0_sw_init(void *handle) /** * jpeg_v2_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_0_sw_fini(void *handle) +static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -125,12 +125,12 @@ static int jpeg_v2_0_sw_fini(void *handle) /** * jpeg_v2_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_0_hw_init(void *handle) +static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -142,13 +142,13 @@ static int jpeg_v2_0_hw_init(void *handle) /** * jpeg_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_0_hw_fini(void *handle) +static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -162,20 +162,19 @@ static int jpeg_v2_0_hw_fini(void *handle) /** * jpeg_v2_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_0_suspend(void *handle) +static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_0_hw_fini(adev); + r = jpeg_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -183,20 +182,19 @@ static int jpeg_v2_0_suspend(void *handle) /** * jpeg_v2_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_0_resume(void *handle) +static int jpeg_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_0_hw_init(adev); + r = jpeg_v2_0_hw_init(ip_block); return r; } @@ -666,9 +664,9 @@ static bool jpeg_v2_0_is_idle(void *handle) UVD_JRBC_STATUS__RB_JOB_DONE_MASK); } -static int jpeg_v2_0_wait_for_idle(void *handle) +static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -744,7 +742,6 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .name = "jpeg_v2_0", .early_init = jpeg_v2_0_early_init, - .late_init = NULL, .sw_init = jpeg_v2_0_sw_init, .sw_fini = jpeg_v2_0_sw_fini, .hw_init = jpeg_v2_0_hw_init, @@ -753,14 +750,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .resume = jpeg_v2_0_resume, .is_idle = jpeg_v2_0_is_idle, .wait_for_idle = jpeg_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_0_set_clockgating_state, .set_powergating_state = jpeg_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index eedb9a829d95..5063a38801d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -50,13 +50,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v2_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v2_5_early_init(void *handle) +static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; int i; @@ -81,15 +81,15 @@ static int jpeg_v2_5_early_init(void *handle) /** * jpeg_v2_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v2_5_sw_init(void *handle) +static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) @@ -153,14 +153,14 @@ static int jpeg_v2_5_sw_init(void *handle) /** * jpeg_v2_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v2_5_sw_fini(void *handle) +static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_jpeg_suspend(adev); if (r) @@ -174,12 +174,12 @@ static int jpeg_v2_5_sw_fini(void *handle) /** * jpeg_v2_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v2_5_hw_init(void *handle) +static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -202,13 +202,13 @@ static int jpeg_v2_5_hw_init(void *handle) /** * jpeg_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v2_5_hw_fini(void *handle) +static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -231,20 +231,19 @@ static int jpeg_v2_5_hw_fini(void *handle) /** * jpeg_v2_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v2_5_suspend(void *handle) +static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v2_5_hw_fini(adev); + r = jpeg_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -252,20 +251,19 @@ static int jpeg_v2_5_suspend(void *handle) /** * jpeg_v2_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v2_5_resume(void *handle) +static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v2_5_hw_init(adev); + r = jpeg_v2_5_hw_init(ip_block); return r; } @@ -501,9 +499,9 @@ static bool jpeg_v2_5_is_idle(void *handle) return ret; } -static int jpeg_v2_5_wait_for_idle(void *handle) +static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -615,7 +613,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .name = "jpeg_v2_5", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -624,20 +621,13 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .name = "jpeg_v2_6", .early_init = jpeg_v2_5_early_init, - .late_init = NULL, .sw_init = jpeg_v2_5_sw_init, .sw_fini = jpeg_v2_5_sw_fini, .hw_init = jpeg_v2_5_hw_init, @@ -646,14 +636,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .resume = jpeg_v2_5_resume, .is_idle = jpeg_v2_5_is_idle, .wait_for_idle = jpeg_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index b1e7fd25afbc..10adbb7cbf53 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -42,13 +42,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle, /** * jpeg_v3_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v3_0_early_init(void *handle) +static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 harvest; @@ -75,13 +75,13 @@ static int jpeg_v3_0_early_init(void *handle) /** * jpeg_v3_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v3_0_sw_init(void *handle) +static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -118,13 +118,13 @@ static int jpeg_v3_0_sw_init(void *handle) /** * jpeg_v3_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v3_0_sw_fini(void *handle) +static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); @@ -139,12 +139,12 @@ static int jpeg_v3_0_sw_fini(void *handle) /** * jpeg_v3_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v3_0_hw_init(void *handle) +static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -156,13 +156,13 @@ static int jpeg_v3_0_hw_init(void *handle) /** * jpeg_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v3_0_hw_fini(void *handle) +static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -176,20 +176,19 @@ static int jpeg_v3_0_hw_fini(void *handle) /** * jpeg_v3_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v3_0_suspend(void *handle) +static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v3_0_hw_fini(adev); + r = jpeg_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -197,20 +196,19 @@ static int jpeg_v3_0_suspend(void *handle) /** * jpeg_v3_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v3_0_resume(void *handle) +static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v3_0_hw_init(adev); + r = jpeg_v3_0_hw_init(ip_block); return r; } @@ -459,9 +457,9 @@ static bool jpeg_v3_0_is_idle(void *handle) return ret; } -static int jpeg_v3_0_wait_for_idle(void *handle) +static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -535,7 +533,6 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .name = "jpeg_v3_0", .early_init = jpeg_v3_0_early_init, - .late_init = NULL, .sw_init = jpeg_v3_0_sw_init, .sw_fini = jpeg_v3_0_sw_fini, .hw_init = jpeg_v3_0_hw_init, @@ -544,14 +541,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .resume = jpeg_v3_0_resume, .is_idle = jpeg_v3_0_is_idle, .wait_for_idle = jpeg_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v3_0_set_clockgating_state, .set_powergating_state = jpeg_v3_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 6c5c1a68a9b7..193dfac5dc76 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -48,13 +48,13 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring); /** * jpeg_v4_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_early_init(void *handle) +static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; @@ -70,13 +70,13 @@ static int jpeg_v4_0_early_init(void *handle) /** * jpeg_v4_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_sw_init(void *handle) +static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -123,6 +123,12 @@ static int jpeg_v4_0_sw_init(void *handle) r = amdgpu_jpeg_ras_sw_init(adev); if (r) return r; + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } @@ -130,19 +136,20 @@ static int jpeg_v4_0_sw_init(void *handle) /** * jpeg_v4_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_sw_fini(void *handle) +static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -151,12 +158,12 @@ static int jpeg_v4_0_sw_fini(void *handle) /** * jpeg_v4_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_hw_init(void *handle) +static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -187,13 +194,13 @@ static int jpeg_v4_0_hw_init(void *handle) /** * jpeg_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_hw_fini(void *handle) +static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); if (!amdgpu_sriov_vf(adev)) { @@ -210,20 +217,19 @@ static int jpeg_v4_0_hw_fini(void *handle) /** * jpeg_v4_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_suspend(void *handle) +static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_hw_fini(adev); + r = jpeg_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -231,20 +237,19 @@ static int jpeg_v4_0_suspend(void *handle) /** * jpeg_v4_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_resume(void *handle) +static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_hw_init(adev); + r = jpeg_v4_0_hw_init(ip_block); return r; } @@ -621,9 +626,9 @@ static bool jpeg_v4_0_is_idle(void *handle) return ret; } -static int jpeg_v4_0_wait_for_idle(void *handle) +static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -702,7 +707,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .name = "jpeg_v4_0", .early_init = jpeg_v4_0_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_sw_init, .sw_fini = jpeg_v4_0_sw_fini, .hw_init = jpeg_v4_0_hw_init, @@ -711,14 +715,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .resume = jpeg_v4_0_resume, .is_idle = jpeg_v4_0_is_idle, .wait_for_idle = jpeg_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_set_clockgating_state, .set_powergating_state = jpeg_v4_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 86958cb2c2ab..67b51bcbacd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -68,13 +68,13 @@ static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) /** * jpeg_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_3_early_init(void *handle) +static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; @@ -88,13 +88,13 @@ static int jpeg_v4_0_3_early_init(void *handle) /** * jpeg_v4_0_3_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_3_sw_init(void *handle) +static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -159,25 +159,33 @@ static int jpeg_v4_0_3_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_3_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_3_sw_fini(void *handle) +static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -299,12 +307,12 @@ static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev) /** * jpeg_v4_0_3_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_3_hw_init(void *handle) +static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r, jpeg_inst; @@ -358,13 +366,13 @@ static int jpeg_v4_0_3_hw_init(void *handle) /** * jpeg_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_3_hw_fini(void *handle) +static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; cancel_delayed_work_sync(&adev->jpeg.idle_work); @@ -380,20 +388,19 @@ static int jpeg_v4_0_3_hw_fini(void *handle) /** * jpeg_v4_0_3_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_3_suspend(void *handle) +static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_3_hw_fini(adev); + r = jpeg_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -401,20 +408,19 @@ static int jpeg_v4_0_3_suspend(void *handle) /** * jpeg_v4_0_3_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_3_resume(void *handle) +static int jpeg_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_3_hw_init(adev); + r = jpeg_v4_0_3_hw_init(ip_block); return r; } @@ -674,11 +680,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80004000); + } } /** @@ -694,11 +701,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x62a04); - } - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00004000); + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00004000); + } } /** @@ -743,14 +751,6 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); amdgpu_ring_write(ring, 0); @@ -929,9 +929,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle) return ret; } -static int jpeg_v4_0_3_wait_for_idle(void *handle) +static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret = 0; int i, j; @@ -1058,7 +1058,6 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .name = "jpeg_v4_0_3", .early_init = jpeg_v4_0_3_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_3_sw_init, .sw_fini = jpeg_v4_0_3_sw_fini, .hw_init = jpeg_v4_0_3_hw_init, @@ -1067,14 +1066,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .resume = jpeg_v4_0_3_resume, .is_idle = jpeg_v4_0_3_is_idle, .wait_for_idle = jpeg_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, .set_powergating_state = jpeg_v4_0_3_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { @@ -1088,7 +1081,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */ - 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ + 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */ 8 + 16, .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 44eeed445ea9..b48e2412e6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -61,13 +61,13 @@ static int amdgpu_ih_clientid_jpeg[] = { /** * jpeg_v4_0_5_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v4_0_5_early_init(void *handle) +static int jpeg_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { case IP_VERSION(4, 0, 5): @@ -94,13 +94,13 @@ static int jpeg_v4_0_5_early_init(void *handle) /** * jpeg_v4_0_5_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v4_0_5_sw_init(void *handle) +static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -153,25 +153,33 @@ static int jpeg_v4_0_5_sw_init(void *handle) adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH); } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } /** * jpeg_v4_0_5_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v4_0_5_sw_fini(void *handle) +static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -180,12 +188,12 @@ static int jpeg_v4_0_5_sw_fini(void *handle) /** * jpeg_v4_0_5_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v4_0_5_hw_init(void *handle) +static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r = 0; @@ -210,13 +218,13 @@ static int jpeg_v4_0_5_hw_init(void *handle) /** * jpeg_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v4_0_5_hw_fini(void *handle) +static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -237,20 +245,19 @@ static int jpeg_v4_0_5_hw_fini(void *handle) /** * jpeg_v4_0_5_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v4_0_5_suspend(void *handle) +static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v4_0_5_hw_fini(adev); + r = jpeg_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -258,20 +265,19 @@ static int jpeg_v4_0_5_suspend(void *handle) /** * jpeg_v4_0_5_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v4_0_5_resume(void *handle) +static int jpeg_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v4_0_5_hw_init(adev); + r = jpeg_v4_0_5_hw_init(ip_block); return r; } @@ -637,9 +643,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle) return ret; } -static int jpeg_v4_0_5_wait_for_idle(void *handle) +static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { @@ -743,7 +749,6 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, - .late_init = NULL, .sw_init = jpeg_v4_0_5_sw_init, .sw_fini = jpeg_v4_0_5_sw_fini, .hw_init = jpeg_v4_0_5_hw_init, @@ -752,14 +757,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .resume = jpeg_v4_0_5_resume, .is_idle = jpeg_v4_0_5_is_idle, .wait_for_idle = jpeg_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, .set_powergating_state = jpeg_v4_0_5_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index d662aa841f97..686f9605239d 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -42,13 +42,13 @@ static int jpeg_v5_0_0_set_powergating_state(void *handle, /** * jpeg_v5_0_0_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int jpeg_v5_0_0_early_init(void *handle) +static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; @@ -62,13 +62,13 @@ static int jpeg_v5_0_0_early_init(void *handle) /** * jpeg_v5_0_0_sw_init - sw init for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int jpeg_v5_0_0_sw_init(void *handle) +static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r; @@ -100,25 +100,32 @@ static int jpeg_v5_0_0_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + /* TODO: Add queue reset mask when FW fully supports it */ + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); + if (r) + return r; return 0; } /** * jpeg_v5_0_0_sw_fini - sw fini for JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * JPEG suspend and free up sw allocation */ -static int jpeg_v5_0_0_sw_fini(void *handle) +static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_jpeg_suspend(adev); if (r) return r; + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); return r; @@ -127,12 +134,12 @@ static int jpeg_v5_0_0_sw_fini(void *handle) /** * jpeg_v5_0_0_hw_init - start and test JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * */ -static int jpeg_v5_0_0_hw_init(void *handle) +static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; int r; @@ -153,13 +160,13 @@ static int jpeg_v5_0_0_hw_init(void *handle) /** * jpeg_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the JPEG block, mark ring as not ready any more */ -static int jpeg_v5_0_0_hw_fini(void *handle) +static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -173,20 +180,19 @@ static int jpeg_v5_0_0_hw_fini(void *handle) /** * jpeg_v5_0_0_suspend - suspend JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend JPEG block */ -static int jpeg_v5_0_0_suspend(void *handle) +static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = jpeg_v5_0_0_hw_fini(adev); + r = jpeg_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_jpeg_suspend(adev); + r = amdgpu_jpeg_suspend(ip_block->adev); return r; } @@ -194,20 +200,19 @@ static int jpeg_v5_0_0_suspend(void *handle) /** * jpeg_v5_0_0_resume - resume JPEG block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init JPEG block */ -static int jpeg_v5_0_0_resume(void *handle) +static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_jpeg_resume(adev); + r = amdgpu_jpeg_resume(ip_block->adev); if (r) return r; - r = jpeg_v5_0_0_hw_init(adev); + r = jpeg_v5_0_0_hw_init(ip_block); return r; } @@ -546,9 +551,9 @@ static bool jpeg_v5_0_0_is_idle(void *handle) return ret; } -static int jpeg_v5_0_0_wait_for_idle(void *handle) +static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, @@ -622,7 +627,6 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, - .late_init = NULL, .sw_init = jpeg_v5_0_0_sw_init, .sw_fini = jpeg_v5_0_0_sw_fini, .hw_init = jpeg_v5_0_0_hw_init, @@ -631,14 +635,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .resume = jpeg_v5_0_0_resume, .is_idle = jpeg_v5_0_0_is_idle, .wait_for_idle = jpeg_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state, .set_powergating_state = jpeg_v5_0_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 231a3d490ea8..9c905b9e9376 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -55,8 +55,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin"); -static int mes_v11_0_hw_init(void *handle); -static int mes_v11_0_hw_fini(void *handle); +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -366,7 +366,7 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ uint32_t queue_id, uint32_t vmid) { struct amdgpu_device *adev = mes->adev; - uint32_t value; + uint32_t value, reg; int i, r = 0; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); @@ -424,6 +424,31 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { + dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n", + me_id, pipe_id, queue_id); + switch (me_id) { + case 1: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ); + break; + case 0: + default: + reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ); + break; + } + + value = 1 << queue_id; + WREG32(reg, value); + /* wait for queue reset done */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(reg) & value)) + break; + udelay(1); + } + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "failed to wait on sdma queue reset done\n"); + r = -ETIMEDOUT; + } } amdgpu_gfx_rlc_exit_safe_mode(adev, 0); @@ -619,6 +644,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) { + dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n"); + return -EINVAL; + } + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -683,6 +720,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes->event_log_gpu_addr; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -883,6 +923,16 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) uint32_t pipe, data = 0; if (enable) { + if (amdgpu_mes_log_enable) { + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO, + lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI, + upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE)); + dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n", + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI), + RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO)); + } + data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); data = REG_SET_FIELD(data, CP_MES_CNTL, @@ -1336,16 +1386,16 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v11_0_sw_init(void *handle) +static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v11_0_funcs; adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init; adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE; r = amdgpu_mes_init(adev); if (r) @@ -1377,9 +1427,9 @@ static int mes_v11_0_sw_init(void *handle) return 0; } -static int mes_v11_0_sw_fini(void *handle) +static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1473,6 +1523,7 @@ static void mes_v11_0_kiq_clear(struct amdgpu_device *adev) static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { @@ -1496,6 +1547,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1506,7 +1563,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) adev->mes.enable_legacy_queue_map = false; if (adev->mes.enable_legacy_queue_map) { - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) goto failure; } @@ -1514,7 +1571,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } @@ -1535,10 +1592,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v11_0_hw_init(void *handle) +static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1590,13 +1647,13 @@ out: return 0; failure: - mes_v11_0_hw_fini(adev); + mes_v11_0_hw_fini(ip_block); return r; } -static int mes_v11_0_hw_fini(void *handle) +static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_is_mes_info_enable(adev)) { amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr, &adev->mes.resource_1_addr); @@ -1604,33 +1661,31 @@ static int mes_v11_0_hw_fini(void *handle) return 0; } -static int mes_v11_0_suspend(void *handle) +static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v11_0_hw_fini(adev); + return mes_v11_0_hw_fini(ip_block); } -static int mes_v11_0_resume(void *handle) +static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v11_0_hw_init(adev); + r = mes_v11_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v11_0_early_init(void *handle) +static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1644,9 +1699,9 @@ static int mes_v11_0_early_init(void *handle) return 0; } -static int mes_v11_0_late_init(void *handle) +static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && @@ -1666,8 +1721,6 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .hw_fini = mes_v11_0_hw_fini, .suspend = mes_v11_0_suspend, .resume = mes_v11_0_resume, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version mes_v11_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index b3175ff676f3..9ecc5d61e49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -39,8 +39,8 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin"); -static int mes_v12_0_hw_init(void *handle); -static int mes_v12_0_hw_fini(void *handle); +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block); +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block); static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); @@ -531,6 +531,14 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; + case MES_MISC_OP_CHANGE_CONFIG: + misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; + misc_pkt.change_config.opcode = + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; + misc_pkt.change_config.option.bits.limit_single_process = + input->change_config.option.limit_single_process; + break; + default: DRM_ERROR("unsupported misc op (%d) \n", input->op); return -EINVAL; @@ -624,6 +632,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE; } + if (enforce_isolation) + mes_set_hw_res_pkt.limit_single_process = 1; + return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); @@ -1326,9 +1337,9 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev, return 0; } -static int mes_v12_0_sw_init(void *handle) +static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; adev->mes.funcs = &mes_v12_0_funcs; @@ -1362,9 +1373,9 @@ static int mes_v12_0_sw_init(void *handle) return 0; } -static int mes_v12_0_sw_fini(void *handle) +static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1452,6 +1463,7 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring) static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (adev->enable_uni_mes) mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); @@ -1479,6 +1491,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) mes_v12_0_enable(adev, true); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); + if (unlikely(!ip_block)) { + dev_err(adev->dev, "Failed to get MES handle\n"); + return -EINVAL; + } + r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); if (r) goto failure; @@ -1492,7 +1510,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) } if (adev->mes.enable_legacy_queue_map) { - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) goto failure; } @@ -1500,7 +1518,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) return r; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } @@ -1522,10 +1540,10 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev) return 0; } -static int mes_v12_0_hw_init(void *handle) +static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->mes.ring[0].sched.ready) goto out; @@ -1584,42 +1602,40 @@ out: return 0; failure: - mes_v12_0_hw_fini(adev); + mes_v12_0_hw_fini(ip_block); return r; } -static int mes_v12_0_hw_fini(void *handle) +static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int mes_v12_0_suspend(void *handle) +static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_mes_suspend(adev); + r = amdgpu_mes_suspend(ip_block->adev); if (r) return r; - return mes_v12_0_hw_fini(adev); + return mes_v12_0_hw_fini(ip_block); } -static int mes_v12_0_resume(void *handle) +static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mes_v12_0_hw_init(adev); + r = mes_v12_0_hw_init(ip_block); if (r) return r; - return amdgpu_mes_resume(adev); + return amdgpu_mes_resume(ip_block->adev); } -static int mes_v12_0_early_init(void *handle) +static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int pipe, r; for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { @@ -1631,9 +1647,9 @@ static int mes_v12_0_early_init(void *handle) return 0; } -static int mes_v12_0_late_init(void *handle) +static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* it's only intended for use in mes_self_test case, not for s0ix and reset */ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index e3ddd22aa172..e9a6f33ca710 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -229,6 +229,52 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) 0); } +static void mmhub_v1_0_init_saw(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + uint32_t tmp; + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + lower_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + upper_32_bits(pt_base >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + + /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */ + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + + /* Program SAW CONTEXT0 CNTL */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL); + tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET; + tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET); + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp); + + /* Disable all Contexts except Context0 */ + tmp = 0xfffe; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp); + + /* Program SAW CNTL4 */ + tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4); + tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET; + tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET; + WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp); +} + static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; @@ -283,6 +329,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } + + if (amdgpu_ip_version(adev, ISP_HWIP, 0)) + mmhub_v1_0_init_saw(adev); } static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index f47bd7ada4d7..4dcb72d1bdda 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { + int r = 0; u32 reg; reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); - if (reg != event) + if (reg == IDH_FAIL) + r = -EINVAL; + else if (reg != event) return -ENOENT; xgpu_nv_mailbox_send_ack(adev); - return 0; + return r; } static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) @@ -178,6 +181,9 @@ send_request: if (data1 != 0) event = IDH_RAS_POISON_READY; break; + case IDH_REQ_RAS_ERROR_COUNT: + event = IDH_RAS_ERROR_COUNT_READY; + break; default: break; } @@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); } +static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .trans_msg = xgpu_nv_mailbox_trans_msg, .ras_poison_handler = xgpu_nv_ras_poison_handler, .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, + .req_ras_err_count = xgpu_nv_req_ras_err_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 1d099ffb3a5a..9d61d76e1bf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -40,6 +40,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, IDH_RAS_POISON = 202, + IDH_REQ_RAS_ERROR_COUNT = 203, }; enum idh_event { @@ -54,6 +55,8 @@ enum idh_event { IDH_RAS_POISON_READY, IDH_PF_SOFT_FLR_NOTIFICATION, IDH_RAS_ERROR_DETECTED, + IDH_RAS_ERROR_COUNT_READY = 11, + IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index b281462093f1..0820ed62e2e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -542,19 +542,19 @@ static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs; } -static int navi10_ih_early_init(void *handle) +static int navi10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; navi10_ih_set_interrupt_funcs(adev); navi10_ih_set_self_irq_funcs(adev); return 0; } -static int navi10_ih_sw_init(void *handle) +static int navi10_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -593,43 +593,37 @@ static int navi10_ih_sw_init(void *handle) return r; } -static int navi10_ih_sw_fini(void *handle) +static int navi10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int navi10_ih_hw_init(void *handle) +static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return navi10_ih_irq_init(adev); } -static int navi10_ih_hw_fini(void *handle) +static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - navi10_ih_irq_disable(adev); + navi10_ih_irq_disable(ip_block->adev); return 0; } -static int navi10_ih_suspend(void *handle) +static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_fini(adev); + return navi10_ih_hw_fini(ip_block); } -static int navi10_ih_resume(void *handle) +static int navi10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return navi10_ih_hw_init(adev); + return navi10_ih_hw_init(ip_block); } static bool navi10_ih_is_idle(void *handle) @@ -638,13 +632,13 @@ static bool navi10_ih_is_idle(void *handle) return true; } -static int navi10_ih_wait_for_idle(void *handle) +static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int navi10_ih_soft_reset(void *handle) +static int navi10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; @@ -700,7 +694,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs navi10_ih_ip_funcs = { .name = "navi10_ih", .early_init = navi10_ih_early_init, - .late_init = NULL, .sw_init = navi10_ih_sw_init, .sw_fini = navi10_ih_sw_fini, .hw_init = navi10_ih_hw_init, @@ -713,8 +706,6 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { .set_clockgating_state = navi10_ih_set_clockgating_state, .set_powergating_state = navi10_ih_set_powergating_state, .get_clockgating_state = navi10_ih_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs navi10_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index a5b60c9a2418..c88284ff92d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -68,6 +68,7 @@ #define SDMA_SUBOP_POLL_REG_WRITE_MEM 1 #define SDMA_SUBOP_POLL_DBIT_WRITE_MEM 2 #define SDMA_SUBOP_POLL_MEM_VERIFY 3 +#define SDMA_SUBOP_VM_INVALIDATION 4 #define HEADER_AGENT_DISPATCH 4 #define HEADER_BARRIER 5 #define SDMA_OP_AQL_COPY 0 @@ -4041,6 +4042,69 @@ /* +** Definitions for SDMA_PKT_VM_INVALIDATION packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 +#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) + +/*define for gfx_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift 16 +#define SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift) + +/*define for mm_eng_id field*/ +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_offset 0 +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift 24 +#define SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift) + +/*define for INVALIDATEREQ word*/ +/*define for invalidatereq field*/ +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 +#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) (((x) & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) + +/*define for ADDRESSRANGELO word*/ +/*define for addressrangelo field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) + +/*define for ADDRESSRANGEHI word*/ +/*define for invalidateack field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) + +/*define for addressrangehi field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) + +/*define for reserved field*/ +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 +#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) + + +/* ** Definitions for SDMA_PKT_ATOMIC packet */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 8d80df94bd8b..a26a9be58eac 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -414,8 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device /* ras_controller_int is dedicated for nbif ras error, * not the global interrupt for sync flood */ - amdgpu_ras_set_fed(adev, true); - amdgpu_ras_reset_gpu(adev); + amdgpu_ras_global_ras_isr(adev); } amdgpu_ras_error_data_fini(&err_data); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index d1bd79bbae53..8a0a63ac88d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -401,6 +401,17 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) return px; } +static bool nbio_v7_9_is_nps_switch_requested(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, + CHANGE_STATUE); + + /* 0x8 - NPS switch requested */ + return (tmp == 0x8); +} static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev, u32 *supp_modes) { @@ -508,6 +519,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, + .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 73065a85e0d2..3bad565ded73 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -634,9 +634,9 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = { .query_video_codecs = &nv_query_video_codecs, }; -static int nv_common_early_init(void *handle) +static int nv_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -944,9 +944,9 @@ static int nv_common_early_init(void *handle) return 0; } -static int nv_common_late_init(void *handle) +static int nv_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -973,9 +973,9 @@ static int nv_common_late_init(void *handle) return 0; } -static int nv_common_sw_init(void *handle) +static int nv_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -983,14 +983,9 @@ static int nv_common_sw_init(void *handle) return 0; } -static int nv_common_sw_fini(void *handle) +static int nv_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int nv_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->nbio.funcs->apply_lc_spc_mode_wa) adev->nbio.funcs->apply_lc_spc_mode_wa(adev); @@ -1014,9 +1009,9 @@ static int nv_common_hw_init(void *handle) return 0; } -static int nv_common_hw_fini(void *handle) +static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because nv_enable_doorbell_aperture @@ -1029,18 +1024,14 @@ static int nv_common_hw_fini(void *handle) return 0; } -static int nv_common_suspend(void *handle) +static int nv_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_fini(adev); + return nv_common_hw_fini(ip_block); } -static int nv_common_resume(void *handle) +static int nv_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return nv_common_hw_init(adev); + return nv_common_hw_init(ip_block); } static bool nv_common_is_idle(void *handle) @@ -1048,16 +1039,6 @@ static bool nv_common_is_idle(void *handle) return true; } -static int nv_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int nv_common_soft_reset(void *handle) -{ - return 0; -} - static int nv_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1115,17 +1096,12 @@ static const struct amd_ip_funcs nv_common_ip_funcs = { .early_init = nv_common_early_init, .late_init = nv_common_late_init, .sw_init = nv_common_sw_init, - .sw_fini = nv_common_sw_fini, .hw_init = nv_common_hw_init, .hw_fini = nv_common_hw_fini, .suspend = nv_common_suspend, .resume = nv_common_resume, .is_idle = nv_common_is_idle, - .wait_for_idle = nv_common_wait_for_idle, - .soft_reset = nv_common_soft_reset, .set_clockgating_state = nv_common_set_clockgating_state, .set_powergating_state = nv_common_set_powergating_state, .get_clockgating_state = nv_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 37b5ddd6f13b..f4a91b126c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -103,6 +103,10 @@ enum psp_gfx_cmd_id GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */ GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */ + /*IDs of performance monitoring/profiling*/ + GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ + /* Dynamic memory partitioninig (NPS mode change)*/ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ }; /* PSP boot config sub-commands */ @@ -351,6 +355,20 @@ struct psp_gfx_cmd_sriov_spatial_part { uint32_t override_this_aid; }; +/*Structure for sq performance monitoring/profiling enable/disable*/ +struct psp_gfx_cmd_config_sq_perfmon { + uint32_t gfx_xcp_mask; + uint8_t core_override; + uint8_t reg_override; + uint8_t perfmon_override; + uint8_t reserved[5]; +}; + +struct psp_gfx_cmd_fb_memory_part { + uint32_t mode; /* requested NPS mode */ + uint32_t resvd; +}; + /* All GFX ring buffer commands. */ union psp_gfx_commands { @@ -365,6 +383,8 @@ union psp_gfx_commands struct psp_gfx_cmd_load_toc cmd_load_toc; struct psp_gfx_cmd_boot_cfg boot_cfg; struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part; + struct psp_gfx_cmd_config_sq_perfmon config_sq_perfmon; + struct psp_gfx_cmd_fb_memory_part cmd_memory_part; }; struct psp_gfx_uresp_reserved diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 51e470e8d67d..c4b775aaee9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -823,6 +823,30 @@ static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp) return (pmfw_ver < 0x557300); } +static bool psp_v13_0_is_reload_needed(struct psp_context *psp) +{ + uint32_t ucode_ver; + + if (!psp_v13_0_is_sos_alive(psp)) + return false; + + /* Restrict reload support only to specific IP versions */ + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + /* TOS version read from microcode header */ + ucode_ver = psp->sos.fw_version; + /* Read TOS version from hardware */ + psp_v13_0_init_sos_version(psp); + return (ucode_ver != psp->sos.fw_version); + default: + return false; + } + + return false; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -847,6 +871,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, + .is_reload_needed = psp_v13_0_is_reload_needed, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 725392522267..7948d74f8722 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -807,9 +807,9 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v2_4_early_init(void *handle) +static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->sdma.num_instances = SDMA_MAX_INSTANCE; @@ -826,11 +826,11 @@ static int sdma_v2_4_early_init(void *handle) return 0; } -static int sdma_v2_4_sw_init(void *handle) +static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -866,9 +866,9 @@ static int sdma_v2_4_sw_init(void *handle) return r; } -static int sdma_v2_4_sw_fini(void *handle) +static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -878,10 +878,10 @@ static int sdma_v2_4_sw_fini(void *handle) return 0; } -static int sdma_v2_4_hw_init(void *handle) +static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v2_4_init_golden_registers(adev); @@ -892,27 +892,21 @@ static int sdma_v2_4_hw_init(void *handle) return r; } -static int sdma_v2_4_hw_fini(void *handle) +static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - sdma_v2_4_enable(adev, false); + sdma_v2_4_enable(ip_block->adev, false); return 0; } -static int sdma_v2_4_suspend(void *handle) +static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_fini(adev); + return sdma_v2_4_hw_fini(ip_block); } -static int sdma_v2_4_resume(void *handle) +static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v2_4_hw_init(adev); + return sdma_v2_4_hw_init(ip_block); } static bool sdma_v2_4_is_idle(void *handle) @@ -927,11 +921,11 @@ static bool sdma_v2_4_is_idle(void *handle) return true; } -static int sdma_v2_4_wait_for_idle(void *handle) +static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -944,10 +938,10 @@ static int sdma_v2_4_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v2_4_soft_reset(void *handle) +static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block) { u32 srbm_soft_reset = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1102,7 +1096,6 @@ static int sdma_v2_4_set_powergating_state(void *handle, static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .name = "sdma_v2_4", .early_init = sdma_v2_4_early_init, - .late_init = NULL, .sw_init = sdma_v2_4_sw_init, .sw_fini = sdma_v2_4_sw_fini, .hw_init = sdma_v2_4_hw_init, @@ -1114,8 +1107,6 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .soft_reset = sdma_v2_4_soft_reset, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e65194fe94af..9a3d729545a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1080,9 +1080,9 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int sdma_v3_0_early_init(void *handle) +static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; switch (adev->asic_type) { @@ -1106,11 +1106,11 @@ static int sdma_v3_0_early_init(void *handle) return 0; } -static int sdma_v3_0_sw_init(void *handle) +static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, @@ -1152,9 +1152,9 @@ static int sdma_v3_0_sw_init(void *handle) return r; } -static int sdma_v3_0_sw_fini(void *handle) +static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1164,10 +1164,10 @@ static int sdma_v3_0_sw_fini(void *handle) return 0; } -static int sdma_v3_0_hw_init(void *handle) +static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_init_golden_registers(adev); @@ -1178,9 +1178,9 @@ static int sdma_v3_0_hw_init(void *handle) return r; } -static int sdma_v3_0_hw_fini(void *handle) +static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); @@ -1188,18 +1188,14 @@ static int sdma_v3_0_hw_fini(void *handle) return 0; } -static int sdma_v3_0_suspend(void *handle) +static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_fini(adev); + return sdma_v3_0_hw_fini(ip_block); } -static int sdma_v3_0_resume(void *handle) +static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v3_0_hw_init(adev); + return sdma_v3_0_hw_init(ip_block); } static bool sdma_v3_0_is_idle(void *handle) @@ -1214,11 +1210,11 @@ static bool sdma_v3_0_is_idle(void *handle) return true; } -static int sdma_v3_0_wait_for_idle(void *handle) +static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1231,9 +1227,9 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool sdma_v3_0_check_soft_reset(void *handle) +static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); @@ -1252,9 +1248,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle) } } -static int sdma_v3_0_pre_soft_reset(void *handle) +static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1271,9 +1267,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle) return 0; } -static int sdma_v3_0_post_soft_reset(void *handle) +static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; if (!adev->sdma.srbm_soft_reset) @@ -1290,9 +1286,9 @@ static int sdma_v3_0_post_soft_reset(void *handle) return 0; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp; @@ -1538,7 +1534,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags) static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, - .late_init = NULL, .sw_init = sdma_v3_0_sw_init, .sw_fini = sdma_v3_0_sw_fini, .hw_init = sdma_v3_0_hw_init, @@ -1554,8 +1549,6 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, .get_clockgating_state = sdma_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 23ef4eb36b40..c1f98f6cf20d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1751,9 +1751,9 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_0_early_init(void *handle) +static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_0_init_microcode(adev); @@ -1780,9 +1780,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); -static int sdma_v4_0_late_init(void *handle) +static int sdma_v4_0_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v4_0_setup_ulv(adev); @@ -1792,11 +1792,11 @@ static int sdma_v4_0_late_init(void *handle) return 0; } -static int sdma_v4_0_sw_init(void *handle) +static int sdma_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t *ptr; @@ -1929,9 +1929,9 @@ static int sdma_v4_0_sw_init(void *handle) return r; } -static int sdma_v4_0_sw_fini(void *handle) +static int sdma_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1951,9 +1951,9 @@ static int sdma_v4_0_sw_fini(void *handle) return 0; } -static int sdma_v4_0_hw_init(void *handle) +static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); @@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(void *handle) return sdma_v4_0_start(adev); } -static int sdma_v4_0_hw_fini(void *handle) +static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) @@ -1988,9 +1988,9 @@ static int sdma_v4_0_hw_fini(void *handle) return 0; } -static int sdma_v4_0_suspend(void *handle) +static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU saves SDMA state for us */ if (adev->in_s0ix) { @@ -1998,12 +1998,12 @@ static int sdma_v4_0_suspend(void *handle) return 0; } - return sdma_v4_0_hw_fini(adev); + return sdma_v4_0_hw_fini(ip_block); } -static int sdma_v4_0_resume(void *handle) +static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* SMU restores SDMA state for us */ if (adev->in_s0ix) { @@ -2012,7 +2012,7 @@ static int sdma_v4_0_resume(void *handle) return 0; } - return sdma_v4_0_hw_init(adev); + return sdma_v4_0_hw_init(ip_block); } static bool sdma_v4_0_is_idle(void *handle) @@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle) return true; } -static int sdma_v4_0_wait_for_idle(void *handle) +static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -2049,7 +2049,7 @@ static int sdma_v4_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_0_soft_reset(void *handle) +static int sdma_v4_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -2350,9 +2350,9 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); uint32_t instance_offset; @@ -2371,9 +2371,9 @@ static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_0_dump_ip_state(void *handle) +static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index c77889040760..a38553f38fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1290,9 +1290,9 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) } } -static int sdma_v4_4_2_early_init(void *handle) +static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v4_4_2_init_microcode(adev); @@ -1318,9 +1318,9 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); #endif -static int sdma_v4_4_2_late_init(void *handle) +static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; #if 0 struct ras_ih_if ih_info = { .cb = sdma_v4_4_2_process_ras_data_cb, @@ -1332,11 +1332,11 @@ static int sdma_v4_4_2_late_init(void *handle) return 0; } -static int sdma_v4_4_2_sw_init(void *handle) +static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 aid_id; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t *ptr; @@ -1430,6 +1430,10 @@ static int sdma_v4_4_2_sw_init(void *handle) } } + /* TODO: Add queue reset mask when FW fully supports it */ + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "fail to initialize sdma ras block\n"); return -EINVAL; @@ -1442,12 +1446,16 @@ static int sdma_v4_4_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v4_4_2_sw_fini(void *handle) +static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -1456,6 +1464,7 @@ static int sdma_v4_4_2_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } + amdgpu_sdma_sysfs_reset_mask_fini(adev); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) || amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) amdgpu_sdma_destroy_inst_ctx(adev, true); @@ -1467,10 +1476,10 @@ static int sdma_v4_4_2_sw_fini(void *handle) return 0; } -static int sdma_v4_4_2_hw_init(void *handle) +static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); @@ -1482,9 +1491,9 @@ static int sdma_v4_4_2_hw_init(void *handle) return r; } -static int sdma_v4_4_2_hw_fini(void *handle) +static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t inst_mask; int i; @@ -1508,21 +1517,19 @@ static int sdma_v4_4_2_hw_fini(void *handle) static int sdma_v4_4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); -static int sdma_v4_4_2_suspend(void *handle) +static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_in_reset(adev)) sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - return sdma_v4_4_2_hw_fini(adev); + return sdma_v4_4_2_hw_fini(ip_block); } -static int sdma_v4_4_2_resume(void *handle) +static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v4_4_2_hw_init(adev); + return sdma_v4_4_2_hw_init(ip_block); } static bool sdma_v4_4_2_is_idle(void *handle) @@ -1540,11 +1547,11 @@ static bool sdma_v4_4_2_is_idle(void *handle) return true; } -static int sdma_v4_4_2_wait_for_idle(void *handle) +static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i, j; u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { for (j = 0; j < adev->sdma.num_instances; j++) { @@ -1559,7 +1566,7 @@ static int sdma_v4_4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v4_4_2_soft_reset(void *handle) +static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -1857,9 +1864,9 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); uint32_t instance_offset; @@ -1878,9 +1885,9 @@ static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v4_4_2_dump_ip_state(void *handle) +static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 3e48ea38385d..fa9b40934957 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -705,14 +705,16 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_0_gfx_resume - setup and start the async dma engines + * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them (NAVI10). - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -722,142 +724,163 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); - - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), - ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), - ring->gpu_addr >> 40); - + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), + ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), + ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, - mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), - doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), + doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, 20); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, 20); - if (amdgpu_sriov_vf(adev)) - sdma_v5_0_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_0_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + } - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_0_ctx_switch_enable(adev, true); - sdma_v5_0_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_0_ctx_switch_enable(adev, true); + sdma_v5_0_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} + +/** + * sdma_v5_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (NAVI10). + * Returns 0 for success, error for failure. + */ +static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -1366,9 +1389,9 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_0_early_init(void *handle) +static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = sdma_v5_0_init_microcode(adev); @@ -1385,11 +1408,11 @@ static int sdma_v5_0_early_init(void *handle) } -static int sdma_v5_0_sw_init(void *handle) +static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t *ptr; @@ -1429,6 +1452,19 @@ static int sdma_v5_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + if (adev->sdma.instance[0].fw_version >= 35) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1436,17 +1472,22 @@ static int sdma_v5_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_0_sw_fini(void *handle) +static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, false); kfree(adev->sdma.ip_dump); @@ -1454,10 +1495,10 @@ static int sdma_v5_0_sw_fini(void *handle) return 0; } -static int sdma_v5_0_hw_init(void *handle) +static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; sdma_v5_0_init_golden_registers(adev); @@ -1466,9 +1507,9 @@ static int sdma_v5_0_hw_init(void *handle) return r; } -static int sdma_v5_0_hw_fini(void *handle) +static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1479,18 +1520,14 @@ static int sdma_v5_0_hw_fini(void *handle) return 0; } -static int sdma_v5_0_suspend(void *handle) +static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_fini(adev); + return sdma_v5_0_hw_fini(ip_block); } -static int sdma_v5_0_resume(void *handle) +static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_0_hw_init(adev); + return sdma_v5_0_hw_init(ip_block); } static bool sdma_v5_0_is_idle(void *handle) @@ -1508,11 +1545,11 @@ static bool sdma_v5_0_is_idle(void *handle) return true; } -static int sdma_v5_0_wait_for_idle(void *handle) +static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1525,13 +1562,100 @@ static int sdma_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v5_0_soft_reset(void *handle) +static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ return 0; } +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + /* check sdma copy engine all idle if frozen not received*/ + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze*/ + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_0_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1778,9 +1902,9 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } -static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); uint32_t instance_offset; @@ -1799,9 +1923,9 @@ static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_0_dump_ip_state(void *handle) +static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0); @@ -1823,7 +1947,6 @@ static void sdma_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_0_ip_funcs = { .name = "sdma_v5_0", .early_init = sdma_v5_0_early_init, - .late_init = NULL, .sw_init = sdma_v5_0_sw_init, .sw_fini = sdma_v5_0_sw_fini, .hw_init = sdma_v5_0_hw_init, @@ -1874,6 +1997,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, + .reset = sdma_v5_0_reset_queue, }; static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index bc9b240a3488..ba5160399ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -522,14 +522,17 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v5_2_gfx_resume - setup and start the async dma engines + * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) + +static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -539,139 +542,161 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) u32 temp; u32 wptr_poll_cntl; u64 wptr_gpu_addr; - int i, r; - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; + ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + } - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, - mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, - SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), - wptr_poll_cntl); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); - + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) ring->wptr = 0; - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); - } + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } - doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); - if (amdgpu_sriov_vf(adev)) - sdma_v5_2_ring_set_wptr(ring); + if (amdgpu_sriov_vf(adev)) + sdma_v5_2_ring_set_wptr(ring); - /* set minor_ptr_update to 0 after wptr programed */ + /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* SRIOV VF has no control of any of registers below */ - if (!amdgpu_sriov_vf(adev)) { - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); - - /* unhalt engine */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); - } + /* SRIOV VF has no control of any of registers below */ + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + + /* unhalt engine */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v5_2_ctx_switch_enable(adev, true); - sdma_v5_2_enable(adev, true); - } + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } + + return amdgpu_ring_test_helper(ring); +} - r = amdgpu_ring_test_helper(ring); +/** + * sdma_v5_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v5_2_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -736,9 +761,9 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v5_2_soft_reset(void *handle) +static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset; u32 tmp; int i; @@ -778,6 +803,7 @@ static int sdma_v5_2_soft_reset(void *handle) static int sdma_v5_2_start(struct amdgpu_device *adev) { int r = 0; + struct amdgpu_ip_block *ip_block; if (amdgpu_sriov_vf(adev)) { sdma_v5_2_ctx_switch_enable(adev, false); @@ -798,7 +824,11 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) msleep(1000); } - sdma_v5_2_soft_reset(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA); + if (!ip_block) + return -EINVAL; + + sdma_v5_2_soft_reset(ip_block); /* unhalt the MEs */ sdma_v5_2_enable(adev, true); /* enable sdma ring preemption */ @@ -1180,7 +1210,28 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); + + /* Update the PD address for this VMID. */ + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), + lower_32_bits(pd_addr)); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), + upper_32_bits(pd_addr)); + + /* Trigger invalidation. */ + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) | + SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) | + SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f)); + amdgpu_ring_write(ring, req); + amdgpu_ring_write(ring, 0xFFFFFFFF); + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) | + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F)); } static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring, @@ -1216,9 +1267,9 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v5_2_early_init(void *handle) +static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1268,11 +1319,11 @@ static unsigned sdma_v5_2_seq_to_trap_id(int seq_num) return -EINVAL; } -static int sdma_v5_2_sw_init(void *handle) +static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t *ptr; @@ -1306,6 +1357,24 @@ static int sdma_v5_2_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 4): + if (adev->sdma.instance[0].fw_version >= 76) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + case IP_VERSION(5, 2, 5): + if (adev->sdma.instance[0].fw_version >= 34) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + /* Allocate memory for SDMA IP Dump buffer */ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); if (ptr) @@ -1313,17 +1382,22 @@ static int sdma_v5_2_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v5_2_sw_fini(void *handle) +static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1331,16 +1405,16 @@ static int sdma_v5_2_sw_fini(void *handle) return 0; } -static int sdma_v5_2_hw_init(void *handle) +static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v5_2_start(adev); } -static int sdma_v5_2_hw_fini(void *handle) +static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1351,18 +1425,14 @@ static int sdma_v5_2_hw_fini(void *handle) return 0; } -static int sdma_v5_2_suspend(void *handle) +static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_fini(adev); + return sdma_v5_2_hw_fini(ip_block); } -static int sdma_v5_2_resume(void *handle) +static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v5_2_hw_init(adev); + return sdma_v5_2_hw_init(ip_block); } static bool sdma_v5_2_is_idle(void *handle) @@ -1380,11 +1450,11 @@ static bool sdma_v5_2_is_idle(void *handle) return true; } -static int sdma_v5_2_wait_for_idle(void *handle) +static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1, sdma2, sdma3; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); @@ -1399,6 +1469,96 @@ static int sdma_v5_2_wait_for_idle(void *handle) return -ETIMEDOUT; } +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, j, r; + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + + /* stop queue */ + ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + for (j = 0; j < adev->usec_timeout; j++) { + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) + break; + udelay(1); + } + + + if (j == adev->usec_timeout) { + stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); + if ((stat1_reg & 0x3FF) != 0x3FF) { + DRM_ERROR("cannot soft reset as sdma not idle\n"); + r = -ETIMEDOUT; + goto err0; + } + } + + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + + cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); + + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ + preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); + + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; + + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + udelay(50); + + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); + + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); + + /* unfreeze and unhalt */ + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + + r = sdma_v5_2_gfx_resume_instance(adev, i, true); + +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} + static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring) { int i, r = 0; @@ -1736,9 +1896,9 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) amdgpu_gfx_off_ctrl(adev, true); } -static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); uint32_t instance_offset; @@ -1757,9 +1917,9 @@ static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v5_2_dump_ip_state(void *handle) +static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2); @@ -1781,7 +1941,6 @@ static void sdma_v5_2_dump_ip_state(void *handle) static const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .name = "sdma_v5_2", .early_init = sdma_v5_2_early_init, - .late_init = NULL, .sw_init = sdma_v5_2_sw_init, .sw_fini = sdma_v5_2_sw_fini, .hw_init = sdma_v5_2_hw_init, @@ -1834,6 +1993,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_2_ring_init_cond_exec, .preempt_ib = sdma_v5_2_ring_preempt_ib, + .reset = sdma_v5_2_reset_queue, }; static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 208a1fa9d4e7..d46128b0ec92 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable) } /** - * sdma_v6_0_gfx_resume - setup and start the async dma engines + * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine * * @adev: amdgpu_device pointer + * @i: instance + * @restore: used to restore wptr when restart * - * Set up the gfx DMA ring buffers and enable them. - * Returns 0 for success, error for failure. + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr. + * Return 0 for success. */ -static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore) { struct amdgpu_ring *ring; u32 rb_cntl, ib_cntl; @@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) u32 doorbell_offset; u32 temp; u64 wptr_gpu_addr; - int i, r; - - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - if (!amdgpu_sriov_vf(adev)) - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + ring = &adev->sdma.instance[i].ring; + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); - /* Set ring buffer size in dwords */ - rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, - RPTR_WRITEBACK_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + if (restore) { + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); + } else { WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0); + } + /* setup the wptr shadow polling */ + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + + if (!restore) + ring->wptr = 0; - /* setup the wptr shadow polling */ - wptr_gpu_addr = ring->wptr_gpu_addr; - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), - upper_32_bits(wptr_gpu_addr)); - - /* set the wb address whether it's enabled or not */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), - upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), - lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); - - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + } - ring->wptr = 0; + doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); - /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); - } + if (i == 0) + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); - doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); + + /* Set up sdma hang watchdog */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); + /* 100ms per unit */ + temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, + max(adev->usec_timeout/100000, 1)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - if (ring->use_doorbell) { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, - OFFSET, ring->doorbell_index); - } else { - doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); - } - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); - - if (i == 0) - adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); - - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_ring_set_wptr(ring); - - /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); - - /* Set up sdma hang watchdog */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); - /* 100ms per unit */ - temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, - max(adev->usec_timeout/100000, 1)); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); - - if (!amdgpu_sriov_vf(adev)) { - /* unhalt engine */ - temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); - } + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); + } - /* enable DMA RB */ - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); - ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); #endif - /* enable DMA IBs */ - WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); + + if (amdgpu_sriov_vf(adev)) + sdma_v6_0_enable(adev, true); + + return amdgpu_ring_test_helper(ring); +} - if (amdgpu_sriov_vf(adev)) - sdma_v6_0_enable(adev, true); +/** + * sdma_v6_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) +{ + int i, r; - r = amdgpu_ring_test_helper(ring); + for (i = 0; i < adev->sdma.num_instances; i++) { + r = sdma_v6_0_gfx_resume_instance(adev, i, false); if (r) return r; } @@ -733,9 +755,9 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v6_0_soft_reset(void *handle) +static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -769,9 +791,9 @@ static int sdma_v6_0_soft_reset(void *handle) return sdma_v6_0_start(adev); } -static bool sdma_v6_0_check_soft_reset(void *handle) +static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1272,9 +1294,9 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) } } -static int sdma_v6_0_early_init(void *handle) +static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1291,11 +1313,11 @@ static int sdma_v6_0_early_init(void *handle) return 0; } -static int sdma_v6_0_sw_init(void *handle) +static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t *ptr; @@ -1328,6 +1350,19 @@ static int sdma_v6_0_sw_init(void *handle) return r; } + adev->sdma.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 2): + case IP_VERSION(6, 0, 3): + if (adev->sdma.instance[0].fw_version >= 21) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; + default: + break; + } + if (amdgpu_sdma_ras_sw_init(adev)) { dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); return -EINVAL; @@ -1340,17 +1375,22 @@ static int sdma_v6_0_sw_init(void *handle) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + r = amdgpu_sdma_sysfs_reset_mask_init(adev); + if (r) + return r; + return r; } -static int sdma_v6_0_sw_fini(void *handle) +static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + amdgpu_sdma_sysfs_reset_mask_fini(adev); amdgpu_sdma_destroy_inst_ctx(adev, true); kfree(adev->sdma.ip_dump); @@ -1358,16 +1398,16 @@ static int sdma_v6_0_sw_fini(void *handle) return 0; } -static int sdma_v6_0_hw_init(void *handle) +static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v6_0_start(adev); } -static int sdma_v6_0_hw_fini(void *handle) +static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1378,18 +1418,14 @@ static int sdma_v6_0_hw_fini(void *handle) return 0; } -static int sdma_v6_0_suspend(void *handle) +static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_fini(adev); + return sdma_v6_0_hw_fini(ip_block); } -static int sdma_v6_0_resume(void *handle) +static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v6_0_hw_init(adev); + return sdma_v6_0_hw_init(ip_block); } static bool sdma_v6_0_is_idle(void *handle) @@ -1407,11 +1443,11 @@ static bool sdma_v6_0_is_idle(void *handle) return true; } -static int sdma_v6_0_wait_for_idle(void *handle) +static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1469,6 +1505,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +{ + struct amdgpu_device *adev = ring->adev; + int i, r; + + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (ring == &adev->sdma.instance[i].ring) + break; + } + + if (i == adev->sdma.num_instances) { + DRM_ERROR("sdma instance not found\n"); + return -EINVAL; + } + + r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); + if (r) + return r; + + return sdma_v6_0_gfx_resume_instance(adev, i, true); +} + static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1556,9 +1617,9 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); uint32_t instance_offset; @@ -1577,9 +1638,9 @@ static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v6_0_dump_ip_state(void *handle) +static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0); @@ -1601,7 +1662,6 @@ static void sdma_v6_0_dump_ip_state(void *handle) const struct amd_ip_funcs sdma_v6_0_ip_funcs = { .name = "sdma_v6_0", .early_init = sdma_v6_0_early_init, - .late_init = NULL, .sw_init = sdma_v6_0_sw_init, .sw_fini = sdma_v6_0_sw_fini, .hw_init = sdma_v6_0_hw_init, @@ -1652,6 +1712,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v6_0_ring_init_cond_exec, .preempt_ib = sdma_v6_0_ring_preempt_ib, + .reset = sdma_v6_0_reset_queue, }; static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1726,7 +1787,7 @@ static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 9288f37a3cc5..d2ce6b6a7ff6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -753,9 +753,9 @@ static int sdma_v7_0_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v7_0_soft_reset(void *handle) +static int sdma_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 tmp; int i; @@ -789,9 +789,9 @@ static int sdma_v7_0_soft_reset(void *handle) return sdma_v7_0_start(adev); } -static bool sdma_v7_0_check_soft_reset(void *handle) +static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; long tmo = msecs_to_jiffies(1000); @@ -1259,9 +1259,9 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } -static int sdma_v7_0_early_init(void *handle) +static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_sdma_init_microcode(adev, 0, true); @@ -1279,11 +1279,11 @@ static int sdma_v7_0_early_init(void *handle) return 0; } -static int sdma_v7_0_sw_init(void *handle) +static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t *ptr; @@ -1326,9 +1326,9 @@ static int sdma_v7_0_sw_init(void *handle) return r; } -static int sdma_v7_0_sw_fini(void *handle) +static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -1344,16 +1344,16 @@ static int sdma_v7_0_sw_fini(void *handle) return 0; } -static int sdma_v7_0_hw_init(void *handle) +static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return sdma_v7_0_start(adev); } -static int sdma_v7_0_hw_fini(void *handle) +static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) return 0; @@ -1364,18 +1364,14 @@ static int sdma_v7_0_hw_fini(void *handle) return 0; } -static int sdma_v7_0_suspend(void *handle) +static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_fini(adev); + return sdma_v7_0_hw_fini(ip_block); } -static int sdma_v7_0_resume(void *handle) +static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return sdma_v7_0_hw_init(adev); + return sdma_v7_0_hw_init(ip_block); } static bool sdma_v7_0_is_idle(void *handle) @@ -1393,11 +1389,11 @@ static bool sdma_v7_0_is_idle(void *handle) return true; } -static int sdma_v7_0_wait_for_idle(void *handle) +static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 sdma0, sdma1; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); @@ -1544,9 +1540,9 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags) { } -static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) +static void sdma_v7_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); uint32_t instance_offset; @@ -1565,9 +1561,9 @@ static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void sdma_v7_0_dump_ip_state(void *handle) +static void sdma_v7_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t instance_offset; uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 85235470e872..00f63d3fbea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2022,9 +2022,9 @@ static uint32_t si_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static int si_common_early_init(void *handle) +static int si_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->smc_rreg = &si_smc_rreg; adev->smc_wreg = &si_smc_wreg; @@ -2148,17 +2148,6 @@ static int si_common_early_init(void *handle) return 0; } -static int si_common_sw_init(void *handle) -{ - return 0; -} - -static int si_common_sw_fini(void *handle) -{ - return 0; -} - - static void si_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { @@ -2633,9 +2622,9 @@ static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev) pcie_set_readrq(adev->pdev, 512); } -static int si_common_hw_init(void *handle) +static int si_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_fix_pci_max_read_req_size(adev); si_init_golden_registers(adev); @@ -2645,23 +2634,14 @@ static int si_common_hw_init(void *handle) return 0; } -static int si_common_hw_fini(void *handle) +static int si_common_hw_fini(struct amdgpu_ip_block *ip_block) { return 0; } -static int si_common_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_fini(adev); -} - -static int si_common_resume(void *handle) +static int si_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_common_hw_init(adev); + return si_common_hw_init(ip_block); } static bool si_common_is_idle(void *handle) @@ -2669,16 +2649,6 @@ static bool si_common_is_idle(void *handle) return true; } -static int si_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int si_common_soft_reset(void *handle) -{ - return 0; -} - static int si_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -2694,20 +2664,12 @@ static int si_common_set_powergating_state(void *handle, static const struct amd_ip_funcs si_common_ip_funcs = { .name = "si_common", .early_init = si_common_early_init, - .late_init = NULL, - .sw_init = si_common_sw_init, - .sw_fini = si_common_sw_fini, .hw_init = si_common_hw_init, .hw_fini = si_common_hw_fini, - .suspend = si_common_suspend, .resume = si_common_resume, .is_idle = si_common_is_idle, - .wait_for_idle = si_common_wait_for_idle, - .soft_reset = si_common_soft_reset, .set_clockgating_state = si_common_set_clockgating_state, .set_powergating_state = si_common_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version si_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 11db5b755832..47647a6083e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -457,9 +457,9 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static int si_dma_early_init(void *handle) +static int si_dma_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->sdma.num_instances = 2; @@ -471,11 +471,11 @@ static int si_dma_early_init(void *handle) return 0; } -static int si_dma_sw_init(void *handle) +static int si_dma_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* DMA0 trap event */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, @@ -506,9 +506,9 @@ static int si_dma_sw_init(void *handle) return r; } -static int si_dma_sw_fini(void *handle) +static int si_dma_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; for (i = 0; i < adev->sdma.num_instances; i++) @@ -517,39 +517,34 @@ static int si_dma_sw_fini(void *handle) return 0; } -static int si_dma_hw_init(void *handle) +static int si_dma_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_dma_start(adev); } -static int si_dma_hw_fini(void *handle) +static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_dma_stop(adev); + si_dma_stop(ip_block->adev); return 0; } -static int si_dma_suspend(void *handle) +static int si_dma_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_fini(adev); + return si_dma_hw_fini(ip_block); } -static int si_dma_resume(void *handle) +static int si_dma_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_dma_hw_init(adev); + return si_dma_hw_init(ip_block); } static bool si_dma_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS2); if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) @@ -558,20 +553,20 @@ static bool si_dma_is_idle(void *handle) return true; } -static int si_dma_wait_for_idle(void *handle) +static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_dma_is_idle(handle)) + if (si_dma_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_dma_soft_reset(void *handle) +static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block) { DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); return 0; @@ -696,7 +691,6 @@ static int si_dma_set_powergating_state(void *handle, static const struct amd_ip_funcs si_dma_ip_funcs = { .name = "si_dma", .early_init = si_dma_early_init, - .late_init = NULL, .sw_init = si_dma_sw_init, .sw_fini = si_dma_sw_fini, .hw_init = si_dma_hw_init, @@ -708,8 +702,6 @@ static const struct amd_ip_funcs si_dma_ip_funcs = { .soft_reset = si_dma_soft_reset, .set_clockgating_state = si_dma_set_clockgating_state, .set_powergating_state = si_dma_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs si_dma_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 5237395e4fab..2ec1ebe4db11 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -156,19 +156,19 @@ static void si_ih_set_rptr(struct amdgpu_device *adev, WREG32(IH_RB_RPTR, ih->rptr); } -static int si_ih_early_init(void *handle) +static int si_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; si_ih_set_interrupt_funcs(adev); return 0; } -static int si_ih_sw_init(void *handle) +static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) @@ -177,43 +177,37 @@ static int si_ih_sw_init(void *handle) return amdgpu_irq_init(adev); } -static int si_ih_sw_fini(void *handle) +static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int si_ih_hw_init(void *handle) +static int si_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return si_ih_irq_init(adev); } -static int si_ih_hw_fini(void *handle) +static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_ih_irq_disable(adev); + si_ih_irq_disable(ip_block->adev); return 0; } -static int si_ih_suspend(void *handle) +static int si_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_fini(adev); + return si_ih_hw_fini(ip_block); } -static int si_ih_resume(void *handle) +static int si_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return si_ih_hw_init(adev); + return si_ih_hw_init(ip_block); } static bool si_ih_is_idle(void *handle) @@ -227,22 +221,22 @@ static bool si_ih_is_idle(void *handle) return true; } -static int si_ih_wait_for_idle(void *handle) +static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (si_ih_is_idle(handle)) + if (si_ih_is_idle(adev)) return 0; udelay(1); } return -ETIMEDOUT; } -static int si_ih_soft_reset(void *handle) +static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(SRBM_STATUS); @@ -284,7 +278,6 @@ static int si_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs si_ih_ip_funcs = { .name = "si_ih", .early_init = si_ih_early_init, - .late_init = NULL, .sw_init = si_ih_sw_init, .sw_fini = si_ih_sw_fini, .hw_init = si_ih_hw_init, @@ -296,8 +289,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { .soft_reset = si_ih_soft_reset, .set_clockgating_state = si_ih_set_clockgating_state, .set_powergating_state = si_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs si_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 481217c32d85..9b01e074af47 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -81,15 +81,9 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -175,15 +169,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } } @@ -193,15 +181,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -213,7 +195,7 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index 0af648931df5..e70ebad3f9fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -80,15 +80,9 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_MES)) continue; - r = adev->ip_blocks[i].version->funcs->suspend(adev); - - if (r) { - dev_err(adev->dev, - "suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = false; } return 0; @@ -186,15 +180,9 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - dev_err(adev->dev, - "resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = true; } for (i = 0; i < adev->num_ip_blocks; i++) { @@ -208,7 +196,7 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init( - (void *)adev); + &adev->ip_blocks[i]); if (r) { dev_err(adev->dev, "late_init of IP block <%s> failed %d after reset\n", diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 307185c0e1b8..ede072758dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -578,20 +578,16 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) { - u32 sol_reg; - - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - /* Will reset for the following suspend abort cases. - * 1) Only reset limit on APU side, dGPU hasn't checked yet. - * 2) S3 suspend abort and TOS already launched. + * 1) Only reset on APU side, dGPU hasn't checked yet. + * 2) S3 suspend aborted in the normal S3 suspend or + * performing pm core test. */ if (adev->flags & AMD_IS_APU && adev->in_s3 && - !adev->suspend_complete && - sol_reg) + !pm_resume_via_firmware()) return true; - - return false; + else + return false; } static int soc15_asic_reset(struct amdgpu_device *adev) @@ -601,11 +597,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev) * successfully. So now, temporarily enable it for the * S3 suspend abort case. */ - if (((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) && - !soc15_need_reset_on_resume(adev)) + + if ((adev->apu_flags & AMD_APU_IS_PICASSO || + !(adev->apu_flags & AMD_APU_IS_RAVEN)) && + soc15_need_reset_on_resume(adev)) + goto asic_reset; + + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; +asic_reset: switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_PCI: dev_info(adev->dev, "PCI reset\n"); @@ -829,6 +831,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RENOIR) return true; + if (amdgpu_gmc_need_reset_on_init(adev)) + return true; + if (amdgpu_psp_tos_reload_needed(adev)) + return true; /* Just return false for soc15 GPUs. Reset does not seem to * be necessary. */ @@ -929,9 +935,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .get_reg_state = &aqua_vanjaram_get_reg_state, }; -static int soc15_common_early_init(void *handle) +static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -1198,9 +1204,9 @@ static int soc15_common_early_init(void *handle) return 0; } -static int soc15_common_late_init(void *handle) +static int soc15_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); @@ -1213,9 +1219,9 @@ static int soc15_common_late_init(void *handle) return 0; } -static int soc15_common_sw_init(void *handle) +static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); @@ -1227,9 +1233,9 @@ static int soc15_common_sw_init(void *handle) return 0; } -static int soc15_common_sw_fini(void *handle) +static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->df.funcs && adev->df.funcs->sw_fini) @@ -1251,9 +1257,9 @@ static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) } } -static int soc15_common_hw_init(void *handle) +static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc15_program_aspm(adev); @@ -1280,9 +1286,9 @@ static int soc15_common_hw_init(void *handle) return 0; } -static int soc15_common_hw_fini(void *handle) +static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc15_enable_doorbell_aperture @@ -1295,7 +1301,12 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + /* + * For minimal init, late_init is not called, hence RAS irqs are not + * enabled. + */ if ((!amdgpu_sriov_vf(adev)) && + (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (adev->nbio.ras && @@ -1309,22 +1320,20 @@ static int soc15_common_hw_fini(void *handle) return 0; } -static int soc15_common_suspend(void *handle) +static int soc15_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc15_common_hw_fini(adev); + return soc15_common_hw_fini(ip_block); } -static int soc15_common_resume(void *handle) +static int soc15_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc15_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); soc15_asic_reset(adev); } - return soc15_common_hw_init(adev); + return soc15_common_hw_init(ip_block); } static bool soc15_common_is_idle(void *handle) @@ -1332,16 +1341,6 @@ static bool soc15_common_is_idle(void *handle) return true; } -static int soc15_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc15_common_soft_reset(void *handle) -{ - return 0; -} - static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data; @@ -1492,11 +1491,7 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = { .suspend = soc15_common_suspend, .resume = soc15_common_resume, .is_idle = soc15_common_is_idle, - .wait_for_idle = soc15_common_wait_for_idle, - .soft_reset = soc15_common_soft_reset, .set_clockgating_state = soc15_common_set_clockgating_state, .set_powergating_state = soc15_common_set_powergating_state, .get_clockgating_state= soc15_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index bba35880badb..d6999835918f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -556,9 +556,9 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = { .update_umd_stable_pstate = &soc21_update_umd_stable_pstate, }; -static int soc21_common_early_init(void *handle) +static int soc21_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -794,9 +794,9 @@ static int soc21_common_early_init(void *handle) return 0; } -static int soc21_common_late_init(void *handle) +static int soc21_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); @@ -832,9 +832,9 @@ static int soc21_common_late_init(void *handle) return 0; } -static int soc21_common_sw_init(void *handle) +static int soc21_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -842,14 +842,9 @@ static int soc21_common_sw_init(void *handle) return 0; } -static int soc21_common_sw_fini(void *handle) -{ - return 0; -} - -static int soc21_common_hw_init(void *handle) +static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc21_program_aspm(adev); @@ -867,9 +862,9 @@ static int soc21_common_hw_init(void *handle) return 0; } -static int soc21_common_hw_fini(void *handle) +static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -890,11 +885,9 @@ static int soc21_common_hw_fini(void *handle) return 0; } -static int soc21_common_suspend(void *handle) +static int soc21_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc21_common_hw_fini(adev); + return soc21_common_hw_fini(ip_block); } static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) @@ -904,9 +897,10 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) /* Will reset for the following suspend abort cases. * 1) Only reset dGPU side. * 2) S3 suspend got aborted and TOS is active. + * As for dGPU suspend abort cases the SOL value + * will be kept as zero at this resume point. */ - if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && - !adev->suspend_complete) { + if (!(adev->flags & AMD_IS_APU) && adev->in_s3) { sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); msleep(100); sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); @@ -917,16 +911,16 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) return false; } -static int soc21_common_resume(void *handle) +static int soc21_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (soc21_need_reset_on_resume(adev)) { dev_info(adev->dev, "S3 suspend aborted, resetting..."); soc21_asic_reset(adev); } - return soc21_common_hw_init(adev); + return soc21_common_hw_init(ip_block); } static bool soc21_common_is_idle(void *handle) @@ -934,16 +928,6 @@ static bool soc21_common_is_idle(void *handle) return true; } -static int soc21_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc21_common_soft_reset(void *handle) -{ - return 0; -} - static int soc21_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1002,17 +986,12 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = { .early_init = soc21_common_early_init, .late_init = soc21_common_late_init, .sw_init = soc21_common_sw_init, - .sw_fini = soc21_common_sw_fini, .hw_init = soc21_common_hw_init, .hw_fini = soc21_common_hw_fini, .suspend = soc21_common_suspend, .resume = soc21_common_resume, .is_idle = soc21_common_is_idle, - .wait_for_idle = soc21_common_wait_for_idle, - .soft_reset = soc21_common_soft_reset, .set_clockgating_state = soc21_common_set_clockgating_state, .set_powergating_state = soc21_common_set_powergating_state, .get_clockgating_state = soc21_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index 29a848f2466b..be96de92b2f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -363,9 +363,9 @@ static const struct amdgpu_asic_funcs soc24_asic_funcs = { .update_umd_stable_pstate = &soc24_update_umd_stable_pstate, }; -static int soc24_common_early_init(void *handle) +static int soc24_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->nbio.funcs->set_reg_remap(adev); adev->smc_rreg = NULL; @@ -440,9 +440,9 @@ static int soc24_common_early_init(void *handle) return 0; } -static int soc24_common_late_init(void *handle) +static int soc24_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev); @@ -455,9 +455,9 @@ static int soc24_common_late_init(void *handle) return 0; } -static int soc24_common_sw_init(void *handle) +static int soc24_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_add_irq_id(adev); @@ -465,14 +465,9 @@ static int soc24_common_sw_init(void *handle) return 0; } -static int soc24_common_sw_fini(void *handle) +static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int soc24_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable aspm */ soc24_program_aspm(adev); @@ -494,9 +489,9 @@ static int soc24_common_hw_init(void *handle) return 0; } -static int soc24_common_hw_fini(void *handle) +static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc21_enable_doorbell_aperture @@ -512,18 +507,14 @@ static int soc24_common_hw_fini(void *handle) return 0; } -static int soc24_common_suspend(void *handle) +static int soc24_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_fini(adev); + return soc24_common_hw_fini(ip_block); } -static int soc24_common_resume(void *handle) +static int soc24_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return soc24_common_hw_init(adev); + return soc24_common_hw_init(ip_block); } static bool soc24_common_is_idle(void *handle) @@ -531,16 +522,6 @@ static bool soc24_common_is_idle(void *handle) return true; } -static int soc24_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int soc24_common_soft_reset(void *handle) -{ - return 0; -} - static int soc24_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -595,14 +576,11 @@ static const struct amd_ip_funcs soc24_common_ip_funcs = { .early_init = soc24_common_early_init, .late_init = soc24_common_late_init, .sw_init = soc24_common_sw_init, - .sw_fini = soc24_common_sw_fini, .hw_init = soc24_common_hw_init, .hw_fini = soc24_common_hw_fini, .suspend = soc24_common_suspend, .resume = soc24_common_resume, .is_idle = soc24_common_is_idle, - .wait_for_idle = soc24_common_wait_for_idle, - .soft_reset = soc24_common_soft_reset, .set_clockgating_state = soc24_common_set_clockgating_state, .set_powergating_state = soc24_common_set_powergating_state, .get_clockgating_state = soc24_common_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 3ac56a9645eb..21b71a427b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -113,6 +113,14 @@ enum ta_ras_address_type { TA_RAS_PA_TO_MCA, }; +enum ta_ras_nps_mode { + TA_RAS_UNKNOWN_MODE = 0, + TA_RAS_NPS1_MODE = 1, + TA_RAS_NPS2_MODE = 2, + TA_RAS_NPS4_MODE = 4, + TA_RAS_NPS8_MODE = 8, +}; + /* Input/output structures for RAS commands */ /**********************************************************/ @@ -139,6 +147,7 @@ struct ta_ras_init_flags { uint8_t dgpu_mode; uint16_t xcc_mask; uint8_t channel_dis_num; + uint8_t nps_mode; }; struct ta_ras_mca_addr { diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 24d49d813607..5a04a6770138 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -283,9 +283,9 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev, } } -static int tonga_ih_early_init(void *handle) +static int tonga_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = amdgpu_irq_add_domain(adev); @@ -297,10 +297,10 @@ static int tonga_ih_early_init(void *handle) return 0; } -static int tonga_ih_sw_init(void *handle) +static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true); if (r) @@ -314,9 +314,9 @@ static int tonga_ih_sw_init(void *handle) return r; } -static int tonga_ih_sw_fini(void *handle) +static int tonga_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); @@ -324,10 +324,10 @@ static int tonga_ih_sw_fini(void *handle) return 0; } -static int tonga_ih_hw_init(void *handle) +static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = tonga_ih_irq_init(adev); if (r) @@ -336,27 +336,21 @@ static int tonga_ih_hw_init(void *handle) return 0; } -static int tonga_ih_hw_fini(void *handle) +static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - tonga_ih_irq_disable(adev); + tonga_ih_irq_disable(ip_block->adev); return 0; } -static int tonga_ih_suspend(void *handle) +static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_resume(void *handle) +static int tonga_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } static bool tonga_ih_is_idle(void *handle) @@ -370,11 +364,11 @@ static bool tonga_ih_is_idle(void *handle) return true; } -static int tonga_ih_wait_for_idle(void *handle) +static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -386,9 +380,9 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static bool tonga_ih_check_soft_reset(void *handle) +static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -405,29 +399,27 @@ static bool tonga_ih_check_soft_reset(void *handle) } } -static int tonga_ih_pre_soft_reset(void *handle) +static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->irq.srbm_soft_reset) + if (!ip_block->adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_fini(adev); + return tonga_ih_hw_fini(ip_block); } -static int tonga_ih_post_soft_reset(void *handle) +static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->irq.srbm_soft_reset) return 0; - return tonga_ih_hw_init(adev); + return tonga_ih_hw_init(ip_block); } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->irq.srbm_soft_reset) @@ -471,7 +463,6 @@ static int tonga_ih_set_powergating_state(void *handle, static const struct amd_ip_funcs tonga_ih_ip_funcs = { .name = "tonga_ih", .early_init = tonga_ih_early_init, - .late_init = NULL, .sw_init = tonga_ih_sw_init, .sw_fini = tonga_ih_sw_fini, .hw_init = tonga_ih_hw_init, @@ -486,8 +477,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs tonga_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 805d6662c88b..bdbca25d80c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -531,9 +531,9 @@ static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev) } -static int uvd_v3_1_early_init(void *handle) +static int uvd_v3_1_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v3_1_set_ring_funcs(adev); @@ -542,10 +542,10 @@ static int uvd_v3_1_early_init(void *handle) return 0; } -static int uvd_v3_1_sw_init(void *handle) +static int uvd_v3_1_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; void *ptr; uint32_t ucode_len; @@ -580,10 +580,10 @@ static int uvd_v3_1_sw_init(void *handle) return r; } -static int uvd_v3_1_sw_fini(void *handle) +static int uvd_v3_1_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -621,13 +621,13 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v3_1_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v3_1_hw_init(void *handle) +static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -688,13 +688,13 @@ done: /** * uvd_v3_1_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v3_1_hw_fini(void *handle) +static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -704,17 +704,17 @@ static int uvd_v3_1_hw_fini(void *handle) return 0; } -static int uvd_v3_1_prepare_suspend(void *handle) +static int uvd_v3_1_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v3_1_suspend(void *handle) +static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -740,23 +740,22 @@ static int uvd_v3_1_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v3_1_hw_fini(adev); + r = uvd_v3_1_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v3_1_resume(void *handle) +static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v3_1_hw_init(adev); + return uvd_v3_1_hw_init(ip_block); } static bool uvd_v3_1_is_idle(void *handle) @@ -766,10 +765,10 @@ static bool uvd_v3_1_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v3_1_wait_for_idle(void *handle) +static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -778,9 +777,9 @@ static int uvd_v3_1_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v3_1_soft_reset(void *handle) +static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v3_1_stop(adev); @@ -806,7 +805,6 @@ static int uvd_v3_1_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .name = "uvd_v3_1", .early_init = uvd_v3_1_early_init, - .late_init = NULL, .sw_init = uvd_v3_1_sw_init, .sw_fini = uvd_v3_1_sw_fini, .hw_init = uvd_v3_1_hw_init, @@ -819,8 +817,6 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .soft_reset = uvd_v3_1_soft_reset, .set_clockgating_state = uvd_v3_1_set_clockgating_state, .set_powergating_state = uvd_v3_1_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 3f19c606f4de..a836dc9cfcad 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v4_2_early_init(void *handle) +static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); @@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle) return 0; } -static int uvd_v4_2_sw_init(void *handle) +static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -130,10 +130,10 @@ static int uvd_v4_2_sw_init(void *handle) return r; } -static int uvd_v4_2_sw_fini(void *handle) +static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -147,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v4_2_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v4_2_hw_init(void *handle) +static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -202,13 +202,13 @@ done: /** * uvd_v4_2_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v4_2_hw_fini(void *handle) +static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -218,17 +218,17 @@ static int uvd_v4_2_hw_fini(void *handle) return 0; } -static int uvd_v4_2_prepare_suspend(void *handle) +static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v4_2_suspend(void *handle) +static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -254,23 +254,22 @@ static int uvd_v4_2_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v4_2_hw_fini(adev); + r = uvd_v4_2_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v4_2_resume(void *handle) +static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v4_2_hw_init(adev); + return uvd_v4_2_hw_init(ip_block); } /** @@ -666,10 +665,10 @@ static bool uvd_v4_2_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v4_2_wait_for_idle(void *handle) +static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -678,9 +677,9 @@ static int uvd_v4_2_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v4_2_soft_reset(void *handle) +static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v4_2_stop(adev); @@ -756,7 +755,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .name = "uvd_v4_2", .early_init = uvd_v4_2_early_init, - .late_init = NULL, .sw_init = uvd_v4_2_sw_init, .sw_fini = uvd_v4_2_sw_fini, .hw_init = uvd_v4_2_hw_init, @@ -769,8 +767,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .soft_reset = uvd_v4_2_soft_reset, .set_clockgating_state = uvd_v4_2_set_clockgating_state, .set_powergating_state = uvd_v4_2_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index efd903c21d48..ab55fae3569e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -88,9 +88,9 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } -static int uvd_v5_0_early_init(void *handle) +static int uvd_v5_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); @@ -99,10 +99,10 @@ static int uvd_v5_0_early_init(void *handle) return 0; } -static int uvd_v5_0_sw_init(void *handle) +static int uvd_v5_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* UVD TRAP */ @@ -128,10 +128,10 @@ static int uvd_v5_0_sw_init(void *handle) return r; } -static int uvd_v5_0_sw_fini(void *handle) +static int uvd_v5_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -143,13 +143,13 @@ static int uvd_v5_0_sw_fini(void *handle) /** * uvd_v5_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v5_0_hw_init(void *handle) +static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -200,13 +200,13 @@ done: /** * uvd_v5_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v5_0_hw_fini(void *handle) +static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -216,17 +216,17 @@ static int uvd_v5_0_hw_fini(void *handle) return 0; } -static int uvd_v5_0_prepare_suspend(void *handle) +static int uvd_v5_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v5_0_suspend(void *handle) +static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -252,23 +252,22 @@ static int uvd_v5_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v5_0_hw_fini(adev); + r = uvd_v5_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v5_0_resume(void *handle) +static int uvd_v5_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v5_0_hw_init(adev); + return uvd_v5_0_hw_init(ip_block); } /** @@ -588,10 +587,10 @@ static bool uvd_v5_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v5_0_wait_for_idle(void *handle) +static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -600,9 +599,9 @@ static int uvd_v5_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int uvd_v5_0_soft_reset(void *handle) +static int uvd_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; uvd_v5_0_stop(adev); @@ -796,10 +795,15 @@ static int uvd_v5_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; if (enable) { /* wait for STATUS to clear */ - if (uvd_v5_0_wait_for_idle(handle)) + if (uvd_v5_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v5_0_enable_clock_gating(adev, true); @@ -863,7 +867,6 @@ out: static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .name = "uvd_v5_0", .early_init = uvd_v5_0_early_init, - .late_init = NULL, .sw_init = uvd_v5_0_sw_init, .sw_fini = uvd_v5_0_sw_fini, .hw_init = uvd_v5_0_hw_init, @@ -877,8 +880,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, .get_clockgating_state = uvd_v5_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 495de5068455..39f8c3d3a135 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -354,9 +354,9 @@ error: return r; } -static int uvd_v6_0_early_init(void *handle) +static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && @@ -375,11 +375,11 @@ static int uvd_v6_0_early_init(void *handle) return 0; } -static int uvd_v6_0_sw_init(void *handle) +static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* UVD TRAP */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); @@ -435,10 +435,10 @@ static int uvd_v6_0_sw_init(void *handle) return r; } -static int uvd_v6_0_sw_fini(void *handle) +static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_uvd_suspend(adev); if (r) @@ -455,13 +455,13 @@ static int uvd_v6_0_sw_fini(void *handle) /** * uvd_v6_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v6_0_hw_init(void *handle) +static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -524,13 +524,13 @@ done: /** * uvd_v6_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v6_0_hw_fini(void *handle) +static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -540,17 +540,17 @@ static int uvd_v6_0_hw_fini(void *handle) return 0; } -static int uvd_v6_0_prepare_suspend(void *handle) +static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v6_0_suspend(void *handle) +static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -576,23 +576,22 @@ static int uvd_v6_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v6_0_hw_fini(adev); + r = uvd_v6_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v6_0_resume(void *handle) +static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v6_0_hw_init(adev); + return uvd_v6_0_hw_init(ip_block); } /** @@ -1151,22 +1150,22 @@ static bool uvd_v6_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v6_0_wait_for_idle(void *handle) +static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v6_0_is_idle(handle)) + if (uvd_v6_0_is_idle(adev)) return 0; } return -ETIMEDOUT; } #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v6_0_check_soft_reset(void *handle) +static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1184,9 +1183,9 @@ static bool uvd_v6_0_check_soft_reset(void *handle) } } -static int uvd_v6_0_pre_soft_reset(void *handle) +static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1195,9 +1194,9 @@ static int uvd_v6_0_pre_soft_reset(void *handle) return 0; } -static int uvd_v6_0_soft_reset(void *handle) +static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->uvd.inst->srbm_soft_reset) @@ -1226,9 +1225,9 @@ static int uvd_v6_0_soft_reset(void *handle) return 0; } -static int uvd_v6_0_post_soft_reset(void *handle) +static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->uvd.inst->srbm_soft_reset) return 0; @@ -1455,11 +1454,16 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ip_block *ip_block; bool enable = (state == AMD_CG_STATE_GATE); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD); + if (!ip_block) + return -EINVAL; + if (enable) { /* wait for STATUS to clear */ - if (uvd_v6_0_wait_for_idle(handle)) + if (uvd_v6_0_wait_for_idle(ip_block)) return -EBUSY; uvd_v6_0_enable_clock_gating(adev, true); /* enable HW gates because UVD is idle */ @@ -1528,7 +1532,6 @@ out: static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .name = "uvd_v6_0", .early_init = uvd_v6_0_early_init, - .late_init = NULL, .sw_init = uvd_v6_0_sw_init, .sw_fini = uvd_v6_0_sw_fini, .hw_init = uvd_v6_0_hw_init, @@ -1545,8 +1548,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, .get_clockgating_state = uvd_v6_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 6068b784dc69..079131aeb2f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -361,9 +361,9 @@ error: return r; } -static int uvd_v7_0_early_init(void *handle) +static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->asic_type == CHIP_VEGA20) { u32 harvest; @@ -395,12 +395,12 @@ static int uvd_v7_0_early_init(void *handle) return 0; } -static int uvd_v7_0_sw_init(void *handle) +static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) @@ -487,10 +487,10 @@ static int uvd_v7_0_sw_init(void *handle) return r; } -static int uvd_v7_0_sw_fini(void *handle) +static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) { int i, j, r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_virt_free_mm_table(adev); @@ -510,13 +510,13 @@ static int uvd_v7_0_sw_fini(void *handle) /** * uvd_v7_0_hw_init - start and test UVD block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v7_0_hw_init(void *handle) +static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; uint32_t tmp; int i, j, r; @@ -588,13 +588,13 @@ done: /** * uvd_v7_0_hw_fini - stop the hardware block * - * @handle: handle used to pass amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v7_0_hw_fini(void *handle) +static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -608,17 +608,17 @@ static int uvd_v7_0_hw_fini(void *handle) return 0; } -static int uvd_v7_0_prepare_suspend(void *handle) +static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; return amdgpu_uvd_prepare_suspend(adev); } -static int uvd_v7_0_suspend(void *handle) +static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -644,23 +644,22 @@ static int uvd_v7_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = uvd_v7_0_hw_fini(adev); + r = uvd_v7_0_hw_fini(ip_block); if (r) return r; return amdgpu_uvd_suspend(adev); } -static int uvd_v7_0_resume(void *handle) +static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); + r = amdgpu_uvd_resume(ip_block->adev); if (r) return r; - return uvd_v7_0_hw_init(adev); + return uvd_v7_0_hw_init(ip_block); } /** @@ -1463,104 +1462,6 @@ static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -#if 0 -static bool uvd_v7_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); -} - -static int uvd_v7_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) { - if (uvd_v7_0_is_idle(handle)) - return 0; - } - return -ETIMEDOUT; -} - -#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd -static bool uvd_v7_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - u32 tmp = RREG32(mmSRBM_STATUS); - - if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || - REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & - AMDGPU_UVD_STATUS_BUSY_MASK)) - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, - SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); - - if (srbm_soft_reset) { - adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->uvd.inst[ring->me].srbm_soft_reset = 0; - return false; - } -} - -static int uvd_v7_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - uvd_v7_0_stop(adev); - return 0; -} - -static int uvd_v7_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int uvd_v7_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->uvd.inst[ring->me].srbm_soft_reset) - return 0; - - mdelay(5); - - return uvd_v7_0_start(adev); -} -#endif - static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -1610,171 +1511,6 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, return 0; } -#if 0 -static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, data2, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); - - data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | - UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | - (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | - (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); - - data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | - UVD_CGC_CTRL__UDEC_CM_MODE_MASK | - UVD_CGC_CTRL__UDEC_IT_MODE_MASK | - UVD_CGC_CTRL__UDEC_DB_MODE_MASK | - UVD_CGC_CTRL__UDEC_MP_MODE_MASK | - UVD_CGC_CTRL__SYS_MODE_MASK | - UVD_CGC_CTRL__UDEC_MODE_MASK | - UVD_CGC_CTRL__MPEG2_MODE_MASK | - UVD_CGC_CTRL__REGS_MODE_MASK | - UVD_CGC_CTRL__RBC_MODE_MASK | - UVD_CGC_CTRL__LMI_MC_MODE_MASK | - UVD_CGC_CTRL__LMI_UMC_MODE_MASK | - UVD_CGC_CTRL__IDCT_MODE_MASK | - UVD_CGC_CTRL__MPRD_MODE_MASK | - UVD_CGC_CTRL__MPC_MODE_MASK | - UVD_CGC_CTRL__LBSI_MODE_MASK | - UVD_CGC_CTRL__LRBBM_MODE_MASK | - UVD_CGC_CTRL__WCB_MODE_MASK | - UVD_CGC_CTRL__VCPU_MODE_MASK | - UVD_CGC_CTRL__JPEG_MODE_MASK | - UVD_CGC_CTRL__JPEG2_MODE_MASK | - UVD_CGC_CTRL__SCPU_MODE_MASK); - data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | - UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | - UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | - UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | - UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); -} - -static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) -{ - uint32_t data, data1, cgc_flags, suvd_flags; - - data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); - - cgc_flags = UVD_CGC_GATE__SYS_MASK | - UVD_CGC_GATE__UDEC_MASK | - UVD_CGC_GATE__MPEG2_MASK | - UVD_CGC_GATE__RBC_MASK | - UVD_CGC_GATE__LMI_MC_MASK | - UVD_CGC_GATE__IDCT_MASK | - UVD_CGC_GATE__MPRD_MASK | - UVD_CGC_GATE__MPC_MASK | - UVD_CGC_GATE__LBSI_MASK | - UVD_CGC_GATE__LRBBM_MASK | - UVD_CGC_GATE__UDEC_RE_MASK | - UVD_CGC_GATE__UDEC_CM_MASK | - UVD_CGC_GATE__UDEC_IT_MASK | - UVD_CGC_GATE__UDEC_DB_MASK | - UVD_CGC_GATE__UDEC_MP_MASK | - UVD_CGC_GATE__WCB_MASK | - UVD_CGC_GATE__VCPU_MASK | - UVD_CGC_GATE__SCPU_MASK | - UVD_CGC_GATE__JPEG_MASK | - UVD_CGC_GATE__JPEG2_MASK; - - suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | - UVD_SUVD_CGC_GATE__SIT_MASK | - UVD_SUVD_CGC_GATE__SMP_MASK | - UVD_SUVD_CGC_GATE__SCM_MASK | - UVD_SUVD_CGC_GATE__SDB_MASK; - - data |= cgc_flags; - data1 |= suvd_flags; - - WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); -} - -static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - else - tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | - GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - - -static int uvd_v7_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - - uvd_v7_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (enable) { - /* disable HW gating and enable Sw gating */ - uvd_v7_0_set_sw_clock_gating(adev); - } else { - /* wait for STATUS to clear */ - if (uvd_v7_0_wait_for_idle(handle)) - return -EBUSY; - - /* enable HW gates because UVD is idle */ - /* uvd_v7_0_set_hw_clock_gating(adev); */ - } - - return 0; -} - -static int uvd_v7_0_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - /* This doesn't actually powergate the UVD block. - * That's done in the dpm code via the SMC. This - * just re-inits the block as necessary. The actual - * gating still happens in the dpm code. We should - * revisit this when there is a cleaner line between - * the smc and the hw blocks - */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - - WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); - - if (state == AMD_PG_STATE_GATE) { - uvd_v7_0_stop(adev); - return 0; - } else { - return uvd_v7_0_start(adev); - } -} -#endif - static int uvd_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1785,7 +1521,6 @@ static int uvd_v7_0_set_clockgating_state(void *handle, const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .name = "uvd_v7_0", .early_init = uvd_v7_0_early_init, - .late_init = NULL, .sw_init = uvd_v7_0_sw_init, .sw_fini = uvd_v7_0_sw_fini, .hw_init = uvd_v7_0_hw_init, @@ -1793,12 +1528,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { .prepare_suspend = uvd_v7_0_prepare_suspend, .suspend = uvd_v7_0_suspend, .resume = uvd_v7_0_resume, - .is_idle = NULL /* uvd_v7_0_is_idle */, - .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */, - .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */, - .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */, - .soft_reset = NULL /* uvd_v7_0_soft_reset */, - .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */, .set_clockgating_state = uvd_v7_0_set_clockgating_state, .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 66fada199bda..c1ed91b39415 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); } -static int vce_v2_0_wait_for_idle(void *handle) +static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (vce_v2_0_is_idle(handle)) + if (vce_v2_0_is_idle(adev)) return 0; } return -ETIMEDOUT; @@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev) static int vce_v2_0_stop(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; int i; int status; + if (vce_v2_0_lmi_clean(adev)) { DRM_INFO("vce is not idle \n"); return 0; } - if (vce_v2_0_wait_for_idle(adev)) { + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN); + if (!ip_block) + return -EINVAL; + + if (vce_v2_0_wait_for_idle(ip_block)) { DRM_INFO("VCE is busy, Can't set clock gating"); return 0; } @@ -398,9 +404,9 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, } } -static int vce_v2_0_early_init(void *handle) +static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.num_rings = 2; @@ -410,11 +416,11 @@ static int vce_v2_0_early_init(void *handle) return 0; } -static int vce_v2_0_sw_init(void *handle) +static int vce_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCE */ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); @@ -444,10 +450,10 @@ static int vce_v2_0_sw_init(void *handle) return r; } -static int vce_v2_0_sw_fini(void *handle) +static int vce_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -456,10 +462,10 @@ static int vce_v2_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v2_0_hw_init(void *handle) +static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_asic_set_vce_clocks(adev, 10000, 10000); vce_v2_0_enable_mgcg(adev, true, false); @@ -475,19 +481,17 @@ static int vce_v2_0_hw_init(void *handle) return 0; } -static int vce_v2_0_hw_fini(void *handle) +static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cancel_delayed_work_sync(&adev->vce.idle_work); + cancel_delayed_work_sync(&ip_block->adev->vce.idle_work); return 0; } -static int vce_v2_0_suspend(void *handle) +static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* @@ -513,28 +517,27 @@ static int vce_v2_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v2_0_hw_fini(adev); + r = vce_v2_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v2_0_resume(void *handle) +static int vce_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v2_0_hw_init(adev); + return vce_v2_0_hw_init(ip_block); } -static int vce_v2_0_soft_reset(void *handle) +static int vce_v2_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); @@ -614,7 +617,6 @@ static int vce_v2_0_set_powergating_state(void *handle, static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .name = "vce_v2_0", .early_init = vce_v2_0_early_init, - .late_init = NULL, .sw_init = vce_v2_0_sw_init, .sw_fini = vce_v2_0_sw_fini, .hw_init = vce_v2_0_hw_init, @@ -626,8 +628,6 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .soft_reset = vce_v2_0_soft_reset, .set_clockgating_state = vce_v2_0_set_clockgating_state, .set_powergating_state = vce_v2_0_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 4bfba2931b08..6bb318a06f19 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -64,7 +64,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); -static int vce_v3_0_wait_for_idle(void *handle); +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block); static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state); /** @@ -396,9 +396,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) } } -static int vce_v3_0_early_init(void *handle) +static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); @@ -415,9 +415,9 @@ static int vce_v3_0_early_init(void *handle) return 0; } -static int vce_v3_0_sw_init(void *handle) +static int vce_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int r, i; @@ -453,10 +453,10 @@ static int vce_v3_0_sw_init(void *handle) return r; } -static int vce_v3_0_sw_fini(void *handle) +static int vce_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vce_suspend(adev); if (r) @@ -465,10 +465,10 @@ static int vce_v3_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v3_0_hw_init(void *handle) +static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vce_v3_0_override_vce_clock_gating(adev, true); @@ -485,14 +485,14 @@ static int vce_v3_0_hw_init(void *handle) return 0; } -static int vce_v3_0_hw_fini(void *handle) +static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); - r = vce_v3_0_wait_for_idle(handle); + r = vce_v3_0_wait_for_idle(ip_block); if (r) return r; @@ -500,10 +500,10 @@ static int vce_v3_0_hw_fini(void *handle) return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); } -static int vce_v3_0_suspend(void *handle) +static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* * Proper cleanups before halting the HW engine: @@ -528,23 +528,22 @@ static int vce_v3_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v3_0_hw_fini(adev); + r = vce_v3_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v3_0_resume(void *handle) +static int vce_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vce_resume(adev); + r = amdgpu_vce_resume(ip_block->adev); if (r) return r; - return vce_v3_0_hw_init(adev); + return vce_v3_0_hw_init(ip_block); } static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) @@ -609,13 +608,13 @@ static bool vce_v3_0_is_idle(void *handle) return !(RREG32(mmSRBM_STATUS2) & mask); } -static int vce_v3_0_wait_for_idle(void *handle) +static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) - if (vce_v3_0_is_idle(handle)) + if (vce_v3_0_is_idle(adev)) return 0; return -ETIMEDOUT; @@ -627,9 +626,9 @@ static int vce_v3_0_wait_for_idle(void *handle) #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) -static bool vce_v3_0_check_soft_reset(void *handle) +static bool vce_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; /* According to VCE team , we should use VCE_STATUS instead @@ -668,9 +667,9 @@ static bool vce_v3_0_check_soft_reset(void *handle) } } -static int vce_v3_0_soft_reset(void *handle) +static int vce_v3_0_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset; if (!adev->vce.srbm_soft_reset) @@ -699,29 +698,29 @@ static int vce_v3_0_soft_reset(void *handle) return 0; } -static int vce_v3_0_pre_soft_reset(void *handle) +static int vce_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_suspend(adev); + return vce_v3_0_suspend(ip_block); } -static int vce_v3_0_post_soft_reset(void *handle) +static int vce_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->vce.srbm_soft_reset) return 0; mdelay(5); - return vce_v3_0_resume(adev); + return vce_v3_0_resume(ip_block); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -897,7 +896,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .name = "vce_v3_0", .early_init = vce_v3_0_early_init, - .late_init = NULL, .sw_init = vce_v3_0_sw_init, .sw_fini = vce_v3_0_sw_fini, .hw_init = vce_v3_0_hw_init, @@ -913,8 +911,6 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, .get_clockgating_state = vce_v3_0_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0748bf44c880..79ee555768a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -407,9 +407,9 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) return 0; } -static int vce_v4_0_early_init(void *handle) +static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; @@ -422,9 +422,9 @@ static int vce_v4_0_early_init(void *handle) return 0; } -static int vce_v4_0_sw_init(void *handle) +static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; unsigned size; @@ -493,10 +493,10 @@ static int vce_v4_0_sw_init(void *handle) return r; } -static int vce_v4_0_sw_fini(void *handle) +static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* free MM table */ amdgpu_virt_free_mm_table(adev); @@ -513,10 +513,10 @@ static int vce_v4_0_sw_fini(void *handle) return amdgpu_vce_sw_fini(adev); } -static int vce_v4_0_hw_init(void *handle) +static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) r = vce_v4_0_sriov_start(adev); @@ -536,14 +536,14 @@ static int vce_v4_0_hw_init(void *handle) return 0; } -static int vce_v4_0_hw_fini(void *handle) +static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vce.idle_work); if (!amdgpu_sriov_vf(adev)) { - /* vce_v4_0_wait_for_idle(handle); */ + /* vce_v4_0_wait_for_idle(ip_block); */ vce_v4_0_stop(adev); } else { /* full access mode, so don't touch any VCE register */ @@ -553,9 +553,9 @@ static int vce_v4_0_hw_fini(void *handle) return 0; } -static int vce_v4_0_suspend(void *handle) +static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -594,16 +594,16 @@ static int vce_v4_0_suspend(void *handle) AMD_CG_STATE_GATE); } - r = vce_v4_0_hw_fini(adev); + r = vce_v4_0_hw_fini(ip_block); if (r) return r; return amdgpu_vce_suspend(adev); } -static int vce_v4_0_resume(void *handle) +static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r, idx; if (adev->vce.vcpu_bo == NULL) @@ -624,7 +624,7 @@ static int vce_v4_0_resume(void *handle) return r; } - return vce_v4_0_hw_init(adev); + return vce_v4_0_hw_init(ip_block); } static void vce_v4_0_mc_resume(struct amdgpu_device *adev) @@ -691,273 +691,6 @@ static int vce_v4_0_set_clockgating_state(void *handle, return 0; } -#if 0 -static bool vce_v4_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; - - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; - - return !(RREG32(mmSRBM_STATUS2) & mask); -} - -static int vce_v4_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) - if (vce_v4_0_is_idle(handle)) - return 0; - - return -ETIMEDOUT; -} - -#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ -#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ -#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ - VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) - -static bool vce_v4_0_check_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset = 0; - - /* According to VCE team , we should use VCE_STATUS instead - * SRBM_STATUS.VCE_BUSY bit for busy status checking. - * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE - * instance's registers are accessed - * (0 for 1st instance, 10 for 2nd instance). - * - *VCE_STATUS - *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | - *|----+----+-----------+----+----+----+----------+---------+----| - *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| - * - * VCE team suggest use bit 3--bit 6 for busy status check - */ - mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); - if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); - srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - if (srbm_soft_reset) { - adev->vce.srbm_soft_reset = srbm_soft_reset; - return true; - } else { - adev->vce.srbm_soft_reset = 0; - return false; - } -} - -static int vce_v4_0_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 srbm_soft_reset; - - if (!adev->vce.srbm_soft_reset) - return 0; - srbm_soft_reset = adev->vce.srbm_soft_reset; - - if (srbm_soft_reset) { - u32 tmp; - - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - - return 0; -} - -static int vce_v4_0_pre_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_suspend(adev); -} - - -static int vce_v4_0_post_soft_reset(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->vce.srbm_soft_reset) - return 0; - - mdelay(5); - - return vce_v4_0_resume(adev); -} - -static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) -{ - u32 tmp, data; - - tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL)); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data); -} - -static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, - bool gated) -{ - u32 data; - - /* Set Override to disable Clock Gating */ - vce_v4_0_override_vce_clock_gating(adev, true); - - /* This function enables MGCG which is controlled by firmware. - With the clocks in the gated state the core is still - accessible but the firmware will throttle the clocks on the - fly as necessary. - */ - if (gated) { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data |= 0x1ff; - data &= ~0xef0000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0x3ff000; - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x2; - data &= ~0x00010000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data |= 0x37f; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } else { - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B)); - data &= ~0x80010; - data |= 0xe70008; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING)); - data |= 0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2)); - data |= 0x10000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING)); - data &= ~0xffc00000; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data); - - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL)); - data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data); - } - vce_v4_0_override_vce_clock_gating(adev, false); -} - -static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - -static int vce_v4_0_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE); - int i; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_TONGA) || - (adev->asic_type == CHIP_FIJI)) - vce_v4_0_set_bypass_mode(adev, enable); - - if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) - return 0; - - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < 2; i++) { - /* Program VCE Instance 0 or 1 if not harvested */ - if (adev->vce.harvest_config & (1 << i)) - continue; - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); - - if (enable) { - /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ - uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data); - - /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ - data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING); - data &= ~(0xf | 0xff0); - data |= ((0x0 << 0) | (0x04 << 4)); - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data); - } - - vce_v4_0_set_vce_sw_clock_gating(adev, enable); - } - - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} -#endif - static int vce_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -1076,19 +809,12 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev, const struct amd_ip_funcs vce_v4_0_ip_funcs = { .name = "vce_v4_0", .early_init = vce_v4_0_early_init, - .late_init = NULL, .sw_init = vce_v4_0_sw_init, .sw_fini = vce_v4_0_sw_fini, .hw_init = vce_v4_0_hw_init, .hw_fini = vce_v4_0_hw_fini, .suspend = vce_v4_0_suspend, .resume = vce_v4_0_resume, - .is_idle = NULL /* vce_v4_0_is_idle */, - .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */, - .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */, - .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */, - .soft_reset = NULL /* vce_v4_0_soft_reset */, - .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, .set_powergating_state = vce_v4_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index ecdfbfefd66a..10e99c926fb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -95,14 +95,14 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); /** * vcn_v1_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v1_0_early_init(void *handle) +static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->vcn.num_enc_rings = 2; @@ -110,7 +110,7 @@ static int vcn_v1_0_early_init(void *handle) vcn_v1_0_set_enc_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); - jpeg_v1_0_early_init(handle); + jpeg_v1_0_early_init(ip_block); return amdgpu_vcn_early_init(adev); } @@ -118,17 +118,17 @@ static int vcn_v1_0_early_init(void *handle) /** * vcn_v1_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v1_0_sw_init(void *handle) +static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -197,7 +197,7 @@ static int vcn_v1_0_sw_init(void *handle) amdgpu_vcn_fwlog_init(adev->vcn.inst); } - r = jpeg_v1_0_sw_init(handle); + r = jpeg_v1_0_sw_init(ip_block); /* Allocate memory for VCN IP Dump buffer */ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); @@ -213,20 +213,20 @@ static int vcn_v1_0_sw_init(void *handle) /** * vcn_v1_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v1_0_sw_fini(void *handle) +static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_suspend(adev); if (r) return r; - jpeg_v1_0_sw_fini(handle); + jpeg_v1_0_sw_fini(ip_block); r = amdgpu_vcn_sw_fini(adev); @@ -238,13 +238,13 @@ static int vcn_v1_0_sw_fini(void *handle) /** * vcn_v1_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v1_0_hw_init(void *handle) +static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -268,13 +268,13 @@ static int vcn_v1_0_hw_init(void *handle) /** * vcn_v1_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v1_0_hw_fini(void *handle) +static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -290,14 +290,14 @@ static int vcn_v1_0_hw_fini(void *handle) /** * vcn_v1_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v1_0_suspend(void *handle) +static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool idle_work_unexecuted; idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -306,7 +306,7 @@ static int vcn_v1_0_suspend(void *handle) amdgpu_dpm_enable_uvd(adev, false); } - r = vcn_v1_0_hw_fini(adev); + r = vcn_v1_0_hw_fini(ip_block); if (r) return r; @@ -318,20 +318,19 @@ static int vcn_v1_0_suspend(void *handle) /** * vcn_v1_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v1_0_resume(void *handle) +static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v1_0_hw_init(adev); + r = vcn_v1_0_hw_init(ip_block); return r; } @@ -1384,9 +1383,9 @@ static bool vcn_v1_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v1_0_wait_for_idle(void *handle) +static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -1925,9 +1924,9 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); } -static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0); uint32_t inst_off, is_powered; @@ -1957,9 +1956,9 @@ static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v1_0_dump_ip_state(void *handle) +static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1988,7 +1987,6 @@ static void vcn_v1_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, - .late_init = NULL, .sw_init = vcn_v1_0_sw_init, .sw_fini = vcn_v1_0_sw_fini, .hw_init = vcn_v1_0_hw_init, @@ -1997,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .resume = vcn_v1_0_resume, .is_idle = vcn_v1_0_is_idle, .wait_for_idle = vcn_v1_0_wait_for_idle, - .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, - .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, - .soft_reset = NULL /* vcn_v1_0_soft_reset */, - .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, .set_powergating_state = vcn_v1_0_set_powergating_state, .dump_ip_state = vcn_v1_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index bfd067e2d2f1..e0322cbca3ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -100,14 +100,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_0_early_init(void *handle) +static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) adev->vcn.num_enc_rings = 1; @@ -124,17 +124,17 @@ static int vcn_v2_0_early_init(void *handle) /** * vcn_v2_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_0_sw_init(void *handle) +static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ @@ -237,14 +237,14 @@ static int vcn_v2_0_sw_init(void *handle) /** * vcn_v2_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_0_sw_fini(void *handle) +static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -268,13 +268,13 @@ static int vcn_v2_0_sw_fini(void *handle) /** * vcn_v2_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_0_hw_init(void *handle) +static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; @@ -305,13 +305,13 @@ static int vcn_v2_0_hw_init(void *handle) /** * vcn_v2_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_0_hw_fini(void *handle) +static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -326,20 +326,19 @@ static int vcn_v2_0_hw_fini(void *handle) /** * vcn_v2_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_0_suspend(void *handle) +static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_0_hw_fini(adev); + r = vcn_v2_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -347,20 +346,19 @@ static int vcn_v2_0_suspend(void *handle) /** * vcn_v2_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_0_resume(void *handle) +static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_0_hw_init(adev); + r = vcn_v2_0_hw_init(ip_block); return r; } @@ -1326,9 +1324,9 @@ static bool vcn_v2_0_is_idle(void *handle) return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } -static int vcn_v2_0_wait_for_idle(void *handle) +static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int ret; ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, @@ -2034,9 +2032,9 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); } -static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0); uint32_t inst_off, is_powered; @@ -2066,9 +2064,9 @@ static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_0_dump_ip_state(void *handle) +static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2097,7 +2095,6 @@ static void vcn_v2_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, - .late_init = NULL, .sw_init = vcn_v2_0_sw_init, .sw_fini = vcn_v2_0_sw_fini, .hw_init = vcn_v2_0_hw_init, @@ -2106,10 +2103,6 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .resume = vcn_v2_0_resume, .is_idle = vcn_v2_0_is_idle, .wait_for_idle = vcn_v2_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_v2_0_set_powergating_state, .dump_ip_state = vcn_v2_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 04e9e806e318..6aa08281d094 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -110,14 +110,14 @@ static int amdgpu_ih_clientid_vcns[] = { /** * vcn_v2_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v2_5_early_init(void *handle) +static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; @@ -151,17 +151,17 @@ static int vcn_v2_5_early_init(void *handle) /** * vcn_v2_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v2_5_sw_init(void *handle) +static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { if (adev->vcn.harvest_config & (1 << j)) @@ -295,14 +295,14 @@ static int vcn_v2_5_sw_init(void *handle) /** * vcn_v2_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v2_5_sw_fini(void *handle) +static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; volatile struct amdgpu_fw_shared *fw_shared; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -333,13 +333,13 @@ static int vcn_v2_5_sw_fini(void *handle) /** * vcn_v2_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v2_5_hw_init(void *handle) +static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r = 0; @@ -381,13 +381,13 @@ static int vcn_v2_5_hw_init(void *handle) /** * vcn_v2_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v2_5_hw_fini(void *handle) +static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -411,20 +411,19 @@ static int vcn_v2_5_hw_fini(void *handle) /** * vcn_v2_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v2_5_suspend(void *handle) +static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v2_5_hw_fini(adev); + r = vcn_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -432,20 +431,19 @@ static int vcn_v2_5_suspend(void *handle) /** * vcn_v2_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v2_5_resume(void *handle) +static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v2_5_hw_init(adev); + r = vcn_v2_5_hw_init(ip_block); return r; } @@ -1786,9 +1784,9 @@ static bool vcn_v2_5_is_idle(void *handle) return ret; } -static int vcn_v2_5_wait_for_idle(void *handle) +static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1926,9 +1924,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5); uint32_t inst_off, is_powered; @@ -1958,9 +1956,9 @@ static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v2_5_dump_ip_state(void *handle) +static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1989,7 +1987,6 @@ static void vcn_v2_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .name = "vcn_v2_5", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -1998,10 +1995,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, @@ -2011,7 +2004,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .name = "vcn_v2_6", .early_init = vcn_v2_5_early_init, - .late_init = NULL, .sw_init = vcn_v2_5_sw_init, .sw_fini = vcn_v2_5_sw_fini, .hw_init = vcn_v2_5_hw_init, @@ -2020,10 +2012,6 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .resume = vcn_v2_5_resume, .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 65dd68b32280..6732ad7f16f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -116,14 +116,14 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v3_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v3_0_early_init(void *handle) +static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; @@ -153,18 +153,18 @@ static int vcn_v3_0_early_init(void *handle) /** * vcn_v3_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v3_0_sw_init(void *handle) +static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; int i, j, r; int vcn_doorbell_index = 0; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t *ptr; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = amdgpu_vcn_sw_init(adev); if (r) @@ -299,13 +299,13 @@ static int vcn_v3_0_sw_init(void *handle) /** * vcn_v3_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v3_0_sw_fini(void *handle) +static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -338,13 +338,13 @@ static int vcn_v3_0_sw_fini(void *handle) /** * vcn_v3_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v3_0_hw_init(void *handle) +static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, j, r; @@ -413,13 +413,13 @@ static int vcn_v3_0_hw_init(void *handle) /** * vcn_v3_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v3_0_hw_fini(void *handle) +static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -443,20 +443,19 @@ static int vcn_v3_0_hw_fini(void *handle) /** * vcn_v3_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v3_0_suspend(void *handle) +static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v3_0_hw_fini(adev); + r = vcn_v3_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -464,20 +463,19 @@ static int vcn_v3_0_suspend(void *handle) /** * vcn_v3_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v3_0_resume(void *handle) +static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v3_0_hw_init(adev); + r = vcn_v3_0_hw_init(ip_block); return r; } @@ -2116,9 +2114,9 @@ static bool vcn_v3_0_is_idle(void *handle) return ret; } -static int vcn_v3_0_wait_for_idle(void *handle) +static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2251,9 +2249,9 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); uint32_t inst_off; @@ -2284,9 +2282,9 @@ static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v3_0_dump_ip_state(void *handle) +static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2315,7 +2313,6 @@ static void vcn_v3_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, - .late_init = NULL, .sw_init = vcn_v3_0_sw_init, .sw_fini = vcn_v3_0_sw_fini, .hw_init = vcn_v3_0_hw_init, @@ -2324,10 +2321,6 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .resume = vcn_v3_0_resume, .is_idle = vcn_v3_0_is_idle, .wait_for_idle = vcn_v3_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, .dump_ip_state = vcn_v3_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 26c6f10a8c8f..5512259cac79 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -106,14 +106,14 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev); /** * vcn_v4_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_early_init(void *handle) +static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; if (amdgpu_sriov_vf(adev)) { @@ -164,14 +164,14 @@ static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) /** * vcn_v4_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_sw_init(void *handle) +static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t *ptr; @@ -253,13 +253,13 @@ static int vcn_v4_0_sw_init(void *handle) /** * vcn_v4_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_sw_fini(void *handle) +static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -294,13 +294,13 @@ static int vcn_v4_0_sw_fini(void *handle) /** * vcn_v4_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_hw_init(void *handle) +static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -341,13 +341,13 @@ static int vcn_v4_0_hw_init(void *handle) /** * vcn_v4_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_hw_fini(void *handle) +static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -372,20 +372,19 @@ static int vcn_v4_0_hw_fini(void *handle) /** * vcn_v4_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_suspend(void *handle) +static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_hw_fini(adev); + r = vcn_v4_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -393,20 +392,19 @@ static int vcn_v4_0_suspend(void *handle) /** * vcn_v4_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_resume(void *handle) +static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_hw_init(adev); + r = vcn_v4_0_hw_init(ip_block); return r; } @@ -1975,13 +1973,13 @@ static bool vcn_v4_0_is_idle(void *handle) /** * vcn_v4_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_wait_for_idle(void *handle) +static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2158,9 +2156,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0); uint32_t inst_off, is_powered; @@ -2190,9 +2188,9 @@ static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_dump_ip_state(void *handle) +static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -2222,7 +2220,6 @@ static void vcn_v4_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .name = "vcn_v4_0", .early_init = vcn_v4_0_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_sw_init, .sw_fini = vcn_v4_0_sw_fini, .hw_init = vcn_v4_0_hw_init, @@ -2231,10 +2228,6 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .resume = vcn_v4_0_resume, .is_idle = vcn_v4_0_is_idle, .wait_for_idle = vcn_v4_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_v4_0_set_powergating_state, .dump_ip_state = vcn_v4_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 0fda70336300..cf808a153fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -95,16 +95,23 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev, int inst_idx, bool indirect); + +static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev) +{ + return (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))); +} + /** * vcn_v4_0_3_early_init - set function pointers * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers */ -static int vcn_v4_0_3_early_init(void *handle) +static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -119,13 +126,13 @@ static int vcn_v4_0_3_early_init(void *handle) /** * vcn_v4_0_3_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_3_sw_init(void *handle) +static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); @@ -212,13 +219,13 @@ static int vcn_v4_0_3_sw_init(void *handle) /** * vcn_v4_0_3_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_3_sw_fini(void *handle) +static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(&adev->ddev, &idx)) { @@ -249,13 +256,13 @@ static int vcn_v4_0_3_sw_fini(void *handle) /** * vcn_v4_0_3_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_3_hw_init(void *handle) +static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r, vcn_inst; @@ -308,13 +315,13 @@ static int vcn_v4_0_3_hw_init(void *handle) /** * vcn_v4_0_3_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_3_hw_fini(void *handle) +static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -327,20 +334,19 @@ static int vcn_v4_0_3_hw_fini(void *handle) /** * vcn_v4_0_3_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_3_suspend(void *handle) +static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = vcn_v4_0_3_hw_fini(adev); + r = vcn_v4_0_3_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -348,20 +354,19 @@ static int vcn_v4_0_3_suspend(void *handle) /** * vcn_v4_0_3_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_3_resume(void *handle) +static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_3_hw_init(adev); + r = vcn_v4_0_3_hw_init(ip_block); return r; } @@ -1430,8 +1435,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); @@ -1442,8 +1447,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { - /* For VF, only local offsets should be used */ - if (amdgpu_sriov_vf(ring->adev)) + /* Use normalized offsets when required */ + if (vcn_v4_0_3_normalizn_reqd(ring->adev)) reg = NORMALIZE_VCN_REG_OFFSET(reg); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); @@ -1567,13 +1572,13 @@ static bool vcn_v4_0_3_is_idle(void *handle) /** * vcn_v4_0_3_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_3_wait_for_idle(void *handle) +static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1733,9 +1738,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; } -static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3); uint32_t inst_off, is_powered; @@ -1765,9 +1770,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_3_dump_ip_state(void *handle) +static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off, inst_id; @@ -1798,7 +1803,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .name = "vcn_v4_0_3", .early_init = vcn_v4_0_3_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_3_sw_init, .sw_fini = vcn_v4_0_3_sw_fini, .hw_init = vcn_v4_0_3_hw_init, @@ -1807,10 +1811,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .resume = vcn_v4_0_3_resume, .is_idle = vcn_v4_0_3_is_idle, .wait_for_idle = vcn_v4_0_3_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, .dump_ip_state = vcn_v4_0_3_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 9d4f5352a62c..71961fb3f7ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -104,14 +104,14 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v4_0_5_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v4_0_5_early_init(void *handle) +static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -124,14 +124,14 @@ static int vcn_v4_0_5_early_init(void *handle) /** * vcn_v4_0_5_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v4_0_5_sw_init(void *handle) +static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t *ptr; @@ -220,13 +220,13 @@ static int vcn_v4_0_5_sw_init(void *handle) /** * vcn_v4_0_5_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v4_0_5_sw_fini(void *handle) +static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -261,13 +261,13 @@ static int vcn_v4_0_5_sw_fini(void *handle) /** * vcn_v4_0_5_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v4_0_5_hw_init(void *handle) +static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -291,13 +291,13 @@ static int vcn_v4_0_5_hw_init(void *handle) /** * vcn_v4_0_5_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v4_0_5_hw_fini(void *handle) +static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -320,20 +320,19 @@ static int vcn_v4_0_5_hw_fini(void *handle) /** * vcn_v4_0_5_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v4_0_5_suspend(void *handle) +static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v4_0_5_hw_fini(adev); + r = vcn_v4_0_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -341,20 +340,19 @@ static int vcn_v4_0_5_suspend(void *handle) /** * vcn_v4_0_5_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v4_0_5_resume(void *handle) +static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v4_0_5_hw_init(adev); + r = vcn_v4_0_5_hw_init(ip_block); return r; } @@ -1469,13 +1467,13 @@ static bool vcn_v4_0_5_is_idle(void *handle) /** * vcn_v4_0_5_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v4_0_5_wait_for_idle(void *handle) +static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1616,9 +1614,9 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5); uint32_t inst_off, is_powered; @@ -1648,9 +1646,9 @@ static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v4_0_5_dump_ip_state(void *handle) +static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1680,7 +1678,6 @@ static void vcn_v4_0_5_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .name = "vcn_v4_0_5", .early_init = vcn_v4_0_5_early_init, - .late_init = NULL, .sw_init = vcn_v4_0_5_sw_init, .sw_fini = vcn_v4_0_5_sw_fini, .hw_init = vcn_v4_0_5_hw_init, @@ -1689,10 +1686,6 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .resume = vcn_v4_0_5_resume, .is_idle = vcn_v4_0_5_is_idle, .wait_for_idle = vcn_v4_0_5_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_v4_0_5_set_powergating_state, .dump_ip_state = vcn_v4_0_5_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index c305386358b4..fe2cc1a80c13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -87,14 +87,14 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring); /** * vcn_v5_0_0_early_init - set function pointers and load microcode * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem */ -static int vcn_v5_0_0_early_init(void *handle) +static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* re-use enc ring as unified ring */ adev->vcn.num_enc_rings = 1; @@ -108,14 +108,14 @@ static int vcn_v5_0_0_early_init(void *handle) /** * vcn_v5_0_0_sw_init - sw init for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Load firmware and sw initialization */ -static int vcn_v5_0_0_sw_init(void *handle) +static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_ring *ring; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t *ptr; @@ -187,13 +187,13 @@ static int vcn_v5_0_0_sw_init(void *handle) /** * vcn_v5_0_0_sw_fini - sw fini for VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation */ -static int vcn_v5_0_0_sw_fini(void *handle) +static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, r, idx; if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -225,13 +225,13 @@ static int vcn_v5_0_0_sw_fini(void *handle) /** * vcn_v5_0_0_hw_init - start and test VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing */ -static int vcn_v5_0_0_hw_init(void *handle) +static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r; @@ -255,13 +255,13 @@ static int vcn_v5_0_0_hw_init(void *handle) /** * vcn_v5_0_0_hw_fini - stop the hardware block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more */ -static int vcn_v5_0_0_hw_fini(void *handle) +static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -284,20 +284,19 @@ static int vcn_v5_0_0_hw_fini(void *handle) /** * vcn_v5_0_0_suspend - suspend VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block */ -static int vcn_v5_0_0_suspend(void *handle) +static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vcn_v5_0_0_hw_fini(adev); + r = vcn_v5_0_0_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(adev); + r = amdgpu_vcn_suspend(ip_block->adev); return r; } @@ -305,20 +304,19 @@ static int vcn_v5_0_0_suspend(void *handle) /** * vcn_v5_0_0_resume - resume VCN block * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block */ -static int vcn_v5_0_0_resume(void *handle) +static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_vcn_resume(adev); + r = amdgpu_vcn_resume(ip_block->adev); if (r) return r; - r = vcn_v5_0_0_hw_init(adev); + r = vcn_v5_0_0_hw_init(ip_block); return r; } @@ -1196,13 +1194,13 @@ static bool vcn_v5_0_0_is_idle(void *handle) /** * vcn_v5_0_0_wait_for_idle - wait for VCN block idle * - * @handle: amdgpu_device pointer + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle */ -static int vcn_v5_0_0_wait_for_idle(void *handle) +static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1343,9 +1341,9 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev) } } -static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) +static void vcn_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0); uint32_t inst_off, is_powered; @@ -1375,9 +1373,9 @@ static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p) } } -static void vcn_v5_0_dump_ip_state(void *handle) +static void vcn_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, j; bool is_powered; uint32_t inst_off; @@ -1406,7 +1404,6 @@ static void vcn_v5_0_dump_ip_state(void *handle) static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .name = "vcn_v5_0_0", .early_init = vcn_v5_0_0_early_init, - .late_init = NULL, .sw_init = vcn_v5_0_0_sw_init, .sw_fini = vcn_v5_0_0_sw_fini, .hw_init = vcn_v5_0_0_hw_init, @@ -1415,10 +1412,6 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .resume = vcn_v5_0_0_resume, .is_idle = vcn_v5_0_0_is_idle, .wait_for_idle = vcn_v5_0_0_wait_for_idle, - .check_soft_reset = NULL, - .pre_soft_reset = NULL, - .soft_reset = NULL, - .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_v5_0_0_set_powergating_state, .dump_ip_state = vcn_v5_0_dump_ip_state, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index bf68e18e3824..0fedadd0a6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -472,18 +472,18 @@ static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs; } -static int vega10_ih_early_init(void *handle) +static int vega10_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega10_ih_set_interrupt_funcs(adev); vega10_ih_set_self_irq_funcs(adev); return 0; } -static int vega10_ih_sw_init(void *handle) +static int vega10_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, @@ -525,43 +525,35 @@ static int vega10_ih_sw_init(void *handle) return r; } -static int vega10_ih_sw_fini(void *handle) +static int vega10_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega10_ih_hw_init(void *handle) +static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_irq_init(adev); + return vega10_ih_irq_init(ip_block->adev); } -static int vega10_ih_hw_fini(void *handle) +static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega10_ih_irq_disable(adev); + vega10_ih_irq_disable(ip_block->adev); return 0; } -static int vega10_ih_suspend(void *handle) +static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_fini(adev); + return vega10_ih_hw_fini(ip_block); } -static int vega10_ih_resume(void *handle) +static int vega10_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega10_ih_hw_init(adev); + return vega10_ih_hw_init(ip_block); } static bool vega10_ih_is_idle(void *handle) @@ -570,13 +562,13 @@ static bool vega10_ih_is_idle(void *handle) return true; } -static int vega10_ih_wait_for_idle(void *handle) +static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega10_ih_soft_reset(void *handle) +static int vega10_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -633,7 +625,6 @@ static int vega10_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega10_ih_ip_funcs = { .name = "vega10_ih", .early_init = vega10_ih_early_init, - .late_init = NULL, .sw_init = vega10_ih_sw_init, .sw_fini = vega10_ih_sw_fini, .hw_init = vega10_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index ac439f0565e3..1c9aff742e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -526,18 +553,18 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev) adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs; } -static int vega20_ih_early_init(void *handle) +static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; vega20_ih_set_interrupt_funcs(adev); vega20_ih_set_self_irq_funcs(adev); return 0; } -static int vega20_ih_sw_init(void *handle) +static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; bool use_bus_addr = true; int r; @@ -586,19 +613,19 @@ static int vega20_ih_sw_init(void *handle) return r; } -static int vega20_ih_sw_fini(void *handle) +static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_irq_fini_sw(adev); return 0; } -static int vega20_ih_hw_init(void *handle) +static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block) { int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; r = vega20_ih_irq_init(adev); if (r) @@ -607,27 +634,21 @@ static int vega20_ih_hw_init(void *handle) return 0; } -static int vega20_ih_hw_fini(void *handle) +static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - vega20_ih_irq_disable(adev); + vega20_ih_irq_disable(ip_block->adev); return 0; } -static int vega20_ih_suspend(void *handle) +static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_fini(adev); + return vega20_ih_hw_fini(ip_block); } -static int vega20_ih_resume(void *handle) +static int vega20_ih_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vega20_ih_hw_init(adev); + return vega20_ih_hw_init(ip_block); } static bool vega20_ih_is_idle(void *handle) @@ -636,13 +657,13 @@ static bool vega20_ih_is_idle(void *handle) return true; } -static int vega20_ih_wait_for_idle(void *handle) +static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* todo */ return -ETIMEDOUT; } -static int vega20_ih_soft_reset(void *handle) +static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block) { /* todo */ @@ -696,7 +717,6 @@ static int vega20_ih_set_powergating_state(void *handle, const struct amd_ip_funcs vega20_ih_ip_funcs = { .name = "vega20_ih", .early_init = vega20_ih_early_init, - .late_init = NULL, .sw_init = vega20_ih_sw_init, .sw_fini = vega20_ih_sw_fini, .hw_init = vega20_ih_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 792b2eb6bbac..a83505815d39 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1455,9 +1455,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = #define CZ_REV_BRISTOL(rev) \ ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) -static int vi_common_early_init(void *handle) +static int vi_common_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->flags & AMD_IS_APU) { adev->smc_rreg = &cz_smc_rreg; @@ -1679,9 +1679,9 @@ static int vi_common_early_init(void *handle) return 0; } -static int vi_common_late_init(void *handle) +static int vi_common_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_get_irq(adev); @@ -1689,9 +1689,9 @@ static int vi_common_late_init(void *handle) return 0; } -static int vi_common_sw_init(void *handle) +static int vi_common_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (amdgpu_sriov_vf(adev)) xgpu_vi_mailbox_add_irq_id(adev); @@ -1699,14 +1699,9 @@ static int vi_common_sw_init(void *handle) return 0; } -static int vi_common_sw_fini(void *handle) +static int vi_common_hw_init(struct amdgpu_ip_block *ip_block) { - return 0; -} - -static int vi_common_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* move the golden regs per IP block */ vi_init_golden_registers(adev); @@ -1718,9 +1713,9 @@ static int vi_common_hw_init(void *handle) return 0; } -static int vi_common_hw_fini(void *handle) +static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; /* enable the doorbell aperture */ vi_enable_doorbell_aperture(adev, false); @@ -1731,18 +1726,14 @@ static int vi_common_hw_fini(void *handle) return 0; } -static int vi_common_suspend(void *handle) +static int vi_common_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_fini(adev); + return vi_common_hw_fini(ip_block); } -static int vi_common_resume(void *handle) +static int vi_common_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return vi_common_hw_init(adev); + return vi_common_hw_init(ip_block); } static bool vi_common_is_idle(void *handle) @@ -1750,16 +1741,6 @@ static bool vi_common_is_idle(void *handle) return true; } -static int vi_common_wait_for_idle(void *handle) -{ - return 0; -} - -static int vi_common_soft_reset(void *handle) -{ - return 0; -} - static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { @@ -2047,19 +2028,14 @@ static const struct amd_ip_funcs vi_common_ip_funcs = { .early_init = vi_common_early_init, .late_init = vi_common_late_init, .sw_init = vi_common_sw_init, - .sw_fini = vi_common_sw_fini, .hw_init = vi_common_hw_init, .hw_fini = vi_common_hw_fini, .suspend = vi_common_suspend, .resume = vi_common_resume, .is_idle = vi_common_is_idle, - .wait_for_idle = vi_common_wait_for_idle, - .soft_reset = vi_common_soft_reset, .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, .get_clockgating_state = vi_common_get_clockgating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version vi_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 3e6b4736a7fe..065d87841459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -365,7 +365,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, + err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id, NULL, NULL, NULL, &doorbell_offset_in_process); if (err != 0) goto err_create_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 48caecf7e72e..723f1220e1cc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -28,6 +28,7 @@ #include "kfd_topology.h" #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_xgmi.h" /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. * GPU processor ID are expressed with Bit[31]=1. @@ -2329,6 +2330,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, continue; if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; + if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev)) + continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + sizeof(struct crat_subtype_iolink)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index fad1c8f2bc83..956198da7859 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -534,7 +534,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) kfd->cwsr_isa = cwsr_trap_gfx11_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); } else { - BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE); + BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) + > KFD_CWSR_TMA_OFFSET); kfd->cwsr_isa = cwsr_trap_gfx12_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex); } @@ -1392,6 +1393,13 @@ void kfd_dec_compute_active(struct kfd_node *node) WARN_ONCE(count < 0, "Compute profile ref. count error"); } +static bool kfd_compute_active(struct kfd_node *node) +{ + if (atomic_read(&node->kfd->compute_profile)) + return true; + return false; +} + void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { /* @@ -1485,6 +1493,24 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) return node->dqm->ops.halt(node->dqm); } +bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) +{ + struct kfd_node *node; + + if (!kfd->init_complete) + return false; + + if (node_id >= kfd->num_nodes) { + dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", + node_id, kfd->num_nodes - 1); + return false; + } + + node = kfd->nodes[node_id]; + + return kfd_compute_active(node); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 648f40091aa3..c79fe9069e22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -202,6 +202,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r, queue_type; uint64_t wptr_addr_off; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -270,6 +272,8 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, int r; struct mes_remove_queue_input queue_input; + if (!dqm->sched_running || dqm->sched_halt) + return 0; if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; @@ -292,7 +296,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, return r; } -static int remove_all_queues_mes(struct device_queue_manager *dqm) +static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm) { struct device_process_node *cur; struct device *dev = dqm->dev->adev->dev; @@ -319,6 +323,33 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm) return retval; } +static int add_all_kfd_queues_mes(struct device_queue_manager *dqm) +{ + struct device_process_node *cur; + struct device *dev = dqm->dev->adev->dev; + struct qcm_process_device *qpd; + struct queue *q; + int retval = 0; + + list_for_each_entry(cur, &dqm->queues, list) { + qpd = cur->qpd; + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_active) + continue; + retval = add_queue_mes(dqm, q, qpd); + if (retval) { + dev_err(dev, "%s: Failed to add queue %d for dev %d", + __func__, + q->properties.queue_id, + dqm->dev->id); + return retval; + } + } + } + + return retval; +} + static int suspend_all_queues_mes(struct device_queue_manager *dqm) { struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; @@ -1742,7 +1773,7 @@ static int halt_cpsch(struct device_queue_manager *dqm) KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - ret = remove_all_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); } dqm->sched_halt = true; dqm_unlock(dqm); @@ -1768,6 +1799,9 @@ static int unhalt_cpsch(struct device_queue_manager *dqm) ret = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + else + ret = add_all_kfd_queues_mes(dqm); + dqm_unlock(dqm); return ret; @@ -1867,7 +1901,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_queues_mes(dqm); + remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -2048,7 +2082,7 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, { unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; struct device *dev = dqm->dev->adev->dev; - uint64_t *fence_addr = dqm->fence_addr; + uint64_t *fence_addr = dqm->fence_addr; while (*fence_addr != fence_value) { /* Fatal err detected, this response won't come */ @@ -2254,6 +2288,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, goto out; *dqm->fence_addr = KFD_FENCE_INIT; + mb(); pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr, KFD_FENCE_COMPLETED); /* should be timed out */ @@ -3173,7 +3208,7 @@ struct copy_context_work_handler_workarea { struct kfd_process *p; }; -static void copy_context_work_handler (struct work_struct *work) +static void copy_context_work_handler(struct work_struct *work) { struct copy_context_work_handler_workarea *workarea; struct mqd_manager *mqd_mgr; @@ -3200,6 +3235,9 @@ static void copy_context_work_handler (struct work_struct *work) struct qcm_process_device *qpd = &pdd->qpd; list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE) + continue; + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; /* We ignore the return value from get_wave_state diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 8ee3d07ffbdf..eacfeb32f35d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -445,14 +445,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", mpages, cpages, migrate.npages); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, node->id, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + 0, node->id, trigger, r); out: if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); @@ -751,14 +750,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(node, p->lead_thread->pid, - start >> PAGE_SHIFT, end >> PAGE_SHIFT, - node->id, 0, trigger); - svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); out_free: kvfree(buf); + kfd_smi_event_migration_end(node, p->lead_thread->pid, + start >> PAGE_SHIFT, end >> PAGE_SHIFT, + node->id, 0, trigger, r); out: if (!r && cpages) { mpages = cpages - upages; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 26e48fdc8728..9e5ca0b93b2a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1347,7 +1347,6 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d4aa843aacfd..87cd52cf4ee9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -271,11 +271,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; int i; - struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; + struct kfd_cu_occupancy *cu_occupancy; u32 queue_format; - memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); - pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); dev = pdd->dev; if (dev->kfd2kgd->get_cu_occupancy == NULL) @@ -293,6 +291,10 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) wave_cnt = 0; max_waves_per_cu = 0; + cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL); + if (!cu_occupancy) + return -ENOMEM; + /* * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. * For AQL queues, because of cooperative dispatch we multiply the wave count @@ -318,6 +320,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; + kfree(cu_occupancy); return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); } @@ -338,8 +341,8 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, attr_sdma); struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; - INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, - kfd_sdma_activity_worker); + INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work, + kfd_sdma_activity_worker); sdma_activity_work_handler.pdd = pdd; sdma_activity_work_handler.sdma_activity_counter = 0; @@ -347,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, schedule_work(&sdma_activity_work_handler.sdma_activity_work); flush_work(&sdma_activity_work_handler.sdma_activity_work); + destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work); return snprintf(buffer, PAGE_SIZE, "%llu\n", (sdma_activity_work_handler.sdma_activity_counter)/ @@ -850,8 +854,10 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) goto out; } - /* A prior open of /dev/kfd could have already created the process. */ - process = find_process(thread, false); + /* A prior open of /dev/kfd could have already created the process. + * find_process will increase process kref in this case + */ + process = find_process(thread, true); if (process) { pr_debug("Process already found\n"); } else { @@ -899,8 +905,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) init_waitqueue_head(&process->wait_irq_drain); } out: - if (!IS_ERR(process)) - kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); mmput(thread->mm); @@ -1186,10 +1190,8 @@ static void kfd_process_ref_release(struct kref *ref) static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) { - int idx = srcu_read_lock(&kfd_processes_srcu); - struct kfd_process *p = find_process_by_mm(mm); - - srcu_read_unlock(&kfd_processes_srcu, idx); + /* This increments p->ref counter if kfd process p exists */ + struct kfd_process *p = kfd_lookup_process_by_mm(mm); return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01b960b15274..c76db22a1000 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -235,7 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm) static int init_user_queue(struct process_queue_manager *pqm, struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, - struct file *f, unsigned int qid) + unsigned int qid) { int retval; @@ -300,7 +300,6 @@ cleanup: int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_node *dev, - struct file *f, struct queue_properties *properties, unsigned int *qid, const struct kfd_criu_queue_priv_data *q_data, @@ -374,7 +373,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * allocate_sdma_queue() in create_queue() has the * corresponding check logic. */ - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -395,7 +394,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = init_user_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -1029,8 +1028,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, print_queue_properties(&qp); - ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack, - NULL); + ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); goto exit; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index de8b9abf7afc..9b8169761ec5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -44,7 +44,7 @@ struct kfd_smi_client { bool suser; }; -#define MAX_KFIFO_SIZE 1024 +#define KFD_MAX_KFIFO_SIZE 8192 static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *); static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *); @@ -86,7 +86,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user, struct kfd_smi_client *client = filep->private_data; unsigned char *buf; - size = min_t(size_t, size, MAX_KFIFO_SIZE); + size = min_t(size_t, size, KFD_MAX_KFIFO_SIZE); buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -292,12 +292,13 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger) + uint32_t from, uint32_t to, uint32_t trigger, + int error_code) { kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, KFD_EVENT_FMT_MIGRATE_END( ktime_get_boottime_ns(), pid, start, end - start, - from, to, trigger)); + from, to, trigger, error_code)); } void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, @@ -354,7 +355,7 @@ int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) return -ENOMEM; INIT_LIST_HEAD(&client->list); - ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL); + ret = kfifo_alloc(&client->fifo, KFD_MAX_KFIFO_SIZE, GFP_KERNEL); if (ret) { kfree(client); return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 85010b8307f8..503bff13d815 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -44,7 +44,8 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, - uint32_t from, uint32_t to, uint32_t trigger); + uint32_t from, uint32_t to, uint32_t trigger, + int error_code); void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger); void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 1893c27746a5..3e2911895c74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3111,8 +3111,6 @@ retry_write_locked: start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); if (prange->actual_loc != 0 || best_loc != 0) { - migration = true; - if (best_loc) { r = svm_migrate_to_vram(prange, best_loc, start, last, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); @@ -3135,7 +3133,9 @@ retry_write_locked: if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", r, svms, start, last); - goto out_unlock_range; + goto out_migrate_fail; + } else { + migration = true; } } @@ -3145,6 +3145,7 @@ retry_write_locked: pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", r, svms, start, last); +out_migrate_fail: kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3871591c9aec..9476e30d6baa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1998,6 +1998,8 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + + dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8d97f17ffe66..f0a6816709ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -320,18 +320,18 @@ static bool dm_is_idle(void *handle) return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -968,7 +968,7 @@ static int dm_set_powergating_state(void *handle, } /* Prototypes of private functions */ -static int dm_early_init(void *handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1307,6 +1307,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) DRM_INFO("DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1696,6 +1719,26 @@ dm_allocate_gpu_mem( return da->cpu_ptr; } +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + static enum dmub_status dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, enum dmub_gpint_command command_code, @@ -1762,16 +1805,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done in dm_sw_fini() */ - return NULL; + goto free_bb; } /* Now ask DMUB to copy the bb */ ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); if (ret != DMUB_STATUS_OK) - return NULL; + goto free_bb; return bb; + +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + } static enum dmub_ips_disable_type dm_get_default_ips_mode( @@ -1886,7 +1933,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) else init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); } else { - init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; @@ -2115,9 +2166,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -2509,9 +2560,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; adev->dm.cgs_device = amdgpu_cgs_create_device(adev); @@ -2531,9 +2582,9 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dal_allocation *da; list_for_each_entry(da, &adev->dm.da_list, list) { @@ -2541,11 +2592,11 @@ static int dm_sw_fini(void *handle) amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); list_del(&da->list); kfree(da); + adev->dm.bb_from_dmub = NULL; break; } } - adev->dm.bb_from_dmub = NULL; kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; @@ -2598,9 +2649,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2790,7 +2841,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2808,9 +2859,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; /* Create DAL display manager */ @@ -2824,15 +2875,15 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_hpd_fini(adev); @@ -2936,9 +2987,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; @@ -3125,9 +3176,9 @@ cleanup: kfree(bundle); } -static int dm_resume(void *handle) +static int dm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -3142,8 +3193,7 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { @@ -3332,23 +3382,16 @@ static int dm_resume(void *handle) aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -3379,8 +3422,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -3495,7 +3536,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -3554,18 +3595,19 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); } if (!aconnector->timing_requested) { @@ -3576,17 +3618,16 @@ void amdgpu_dm_update_connector_after_detect( "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ @@ -4622,7 +4663,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } @@ -5177,15 +5223,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -5257,9 +5308,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) return r; } -static int dm_early_init(void *handle) +static int dm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); @@ -7122,32 +7173,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link && dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - aconnector->edid = edid; - + aconnector->drm_edid = drm_edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); - memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, @@ -7177,36 +7220,26 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; - struct i2c_adapter *ddc; - - if (dc_link->aux_mode) - ddc = &aconnector->dm_dp_aux.aux.ddc; - else - ddc = &aconnector->i2c->base; + const struct drm_edid *drm_edid; + const struct edid *edid; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { + drm_edid = drm_edid_read(connector); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; } - if (drm_detect_hdmi_monitor(edid)) + if (connector->display_info.is_hdmi) init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -7313,10 +7346,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; if (!dm_state) return NULL; + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + bpc_limit = 8; + do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, @@ -7337,11 +7375,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -7349,10 +7388,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + if ((dc_result == DC_FAIL_ENC_VALIDATE || + dc_result == DC_EXCEED_DONGLE_CAP) && + !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", + __func__, __LINE__); aconnector->force_yuv420_output = true; stream = create_validate_stream_for_sink(aconnector, drm_mode, @@ -7893,16 +7935,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -7916,10 +7958,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8015,12 +8057,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!(amdgpu_freesync_vid_mode && drm_edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8033,24 +8075,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); @@ -9580,7 +9622,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); @@ -10124,6 +10166,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -12024,7 +12068,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, } static void parse_edid_displayid_vrr(struct drm_connector *connector, - struct edid *edid) + const struct edid *edid) { u8 *edid_ext = NULL; int i; @@ -12067,7 +12111,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector, } static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12102,7 +12146,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -12136,7 +12181,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -12144,19 +12189,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; @@ -12169,13 +12211,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -12186,6 +12228,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + /* Some eDP panels only have the refresh rate range info in DisplayID */ if ((connector->display_info.monitor_range.min_vfreq == 0 || connector->display_info.monitor_range.max_vfreq == 0)) @@ -12193,67 +12237,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - bool edid_check_required = false; - - if (amdgpu_dm_connector->dc_link && - amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { - if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) - freesync_capable = true; - } else { - edid_check_required = edid->version > 1 || - (edid->version == 1 && - edid->revision > 1); - } - } - - if (edid_check_required) { - for (i = 0; i < 4; i++) { - - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - - if (edid->revision >= 4) { - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) - connector->display_info.monitor_range.min_vfreq += 255; - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) - connector->display_info.monitor_range.max_vfreq += 255; - } - - amdgpu_dm_connector->min_vfreq = - connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = - connector->display_info.monitor_range.max_vfreq; - - break; - } - - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - - freesync_capable = true; - } - } + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { @@ -12262,12 +12249,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 90dfffec33cf..6464a8378387 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -673,7 +673,7 @@ struct amdgpu_dm_connector { /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ - struct edid *edid; + const struct drm_edid *drm_edid; /* shared with amdgpu */ struct amdgpu_hpd hpd; @@ -951,7 +951,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); @@ -1004,6 +1004,9 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev, enum dc_gpu_mem_alloc_type type, size_t size, long long *addr); +void dm_free_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *addr); bool amdgpu_dm_is_headless(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 288be19db7c1..64a041c2af05 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -35,8 +35,8 @@ #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" -#define HPD_DETECTION_PERIOD_uS 5000000 -#define HPD_DETECTION_TIME_uS 1000 +#define HPD_DETECTION_PERIOD_uS 2000000 +#define HPD_DETECTION_TIME_uS 100000 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { @@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( amdgpu_dm_psr_enable(vblank_work->stream); if (dm->idle_workqueue && + (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && dm->dc->idle_optimizations_allowed && dm->idle_workqueue->enable && !dm->idle_workqueue->running) @@ -251,10 +252,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - if (dm->active_vblank_irq_count > 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n"); + if (dm->active_vblank_irq_count > 0) dc_allow_idle_optimizations(dm->dc, false); - } /* * Control PSR based on vblank requirements from OS @@ -272,10 +271,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) vblank_work->acrtc->dm_irq_params.allow_sr_entry); } - if (dm->active_vblank_irq_count == 0) { - DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n"); + if (dm->active_vblank_irq_count == 0) dc_allow_idle_optimizations(dm->dc, true); - } mutex_unlock(&dm->dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index db56b0aa5454..6a97bb2d9160 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1529,7 +1529,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1543,8 +1542,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1558,10 +1555,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_clock_en); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1719,7 +1715,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1733,8 +1728,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1748,10 +1741,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1907,7 +1899,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1921,8 +1912,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -1936,10 +1925,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2091,7 +2079,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2105,8 +2092,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2120,10 +2105,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2270,7 +2254,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2284,8 +2267,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2299,10 +2280,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2328,7 +2308,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2342,8 +2321,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2357,10 +2334,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2401,7 +2377,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2415,8 +2390,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2430,10 +2403,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_chunk_size); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2474,7 +2446,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2488,8 +2459,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && @@ -2503,10 +2472,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index eea317dcbe8c..b0fea0856866 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -23,6 +23,8 @@ * */ +#include <acpi/video.h> + #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> @@ -642,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + if (ret < 0) + goto err; } // write rc offset @@ -650,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + if (ret < 0) + goto err; // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + if (ret < 0) + goto err; // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); - - if (ret < 0) { - DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); - return false; - } + if (ret < 0) + goto err; // poll until active is 0 for (i = 0; i < 10; i++) { @@ -686,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; + +err: + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) @@ -892,6 +901,60 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) return dp_sink_present; } +static int +dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_connector *connector = data; + struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); + unsigned char start = block * EDID_LENGTH; + void *edid; + int r; + + if (!acpidev) + return -ENODEV; + + /* fetch the entire edid from BIOS */ + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid); + if (r < 0) { + drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); + return r; + } + if (len > r || start > r || start + len > r) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, edid + start, len); + r = 0; + +cleanup: + kfree(edid); + + return r; +} + +static const struct drm_edid * +dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + + if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) + return NULL; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + break; + default: + return NULL; + } + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, @@ -902,7 +965,8 @@ enum dc_edid_status dm_helpers_read_local_edid( struct i2c_adapter *ddc; int retry = 3; enum dc_edid_status edid_status; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -913,26 +977,31 @@ enum dc_edid_status dm_helpers_read_local_edid( * do check sum and retry to make sure read correct edid. */ do { - - edid = drm_get_edid(&aconnector->base, ddc); + drm_edid = dm_helpers_read_acpi_edid(aconnector); + if (drm_edid) + drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); - if (!edid && connector->edid_corrupt) { + if (!drm_edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } - if (!edid) + if (!drm_edid) return EDID_NO_RESPONSE; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ - kfree(edid); + drm_edid_free(drm_edid); edid_status = dm_helpers_parse_edid_caps( link, @@ -1055,17 +1124,8 @@ void dm_helpers_free_gpu_mem( void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - - /* walk the da list in DM */ - list_for_each_entry(da, &adev->dm.da_list, list) { - if (pvMem == da->cpu_ptr) { - amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); - list_del(&da->list); - kfree(da); - break; - } - } + + dm_free_gpu_mem(adev, type, pvMem); } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) @@ -1314,4 +1374,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream { // TODO return false; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a08e8a0b696c..6e4359490613 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) dc_sink_release(aconnector->dc_sink); } - kfree(aconnector->edid); + drm_edid_free(aconnector->drm_edid); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->mst_output_port); @@ -182,7 +182,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; } @@ -302,16 +302,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector) return drm_add_edid_modes(connector, NULL); - if (!aconnector->edid) { - struct edid *edid; + if (!aconnector->drm_edid) { + const struct drm_edid *drm_edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); + drm_edid = drm_dp_mst_edid_read(connector, + &aconnector->mst_root->mst_mgr, + aconnector->mst_output_port); - if (!edid) { + if (!drm_edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); - drm_connector_update_edid_property( + drm_edid_connector_update( &aconnector->base, NULL); @@ -345,7 +347,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } @@ -360,10 +362,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + const struct edid *edid; + + edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { @@ -405,7 +410,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); + connector, aconnector->drm_edid); #if defined(CONFIG_DRM_AMD_DC_FP) if (!validate_dsc_caps_on_connector(aconnector)) @@ -419,10 +424,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } } - drm_connector_update_edid_property( - &aconnector->base, aconnector->edid); + drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); - ret = drm_add_edid_modes(connector, aconnector->edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -500,7 +504,7 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + aconnector->drm_edid = NULL; aconnector->dsc_aux = NULL; port->passthrough_aux = NULL; @@ -1120,6 +1124,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int i, k, ret; bool debugfs_overwrite = false; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1127,7 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state); /* Set up params */ - DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); + DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -1143,6 +1148,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector->mst_output_port) continue; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", + __func__, __LINE__, stream); + continue; + } + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; @@ -1175,6 +1188,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); + if (count == 0) { ASSERT(0); return 0; @@ -1302,7 +1317,7 @@ static bool is_dsc_need_re_compute( continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; - if (!aconnector || !aconnector->dsc_aux) + if (!aconnector) continue; stream_on_link[new_stream_on_link_num] = aconnector; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index adc710fe4a45..8d2cf95ae739 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -78,10 +78,3 @@ void bios_set_scratch_critical_state( uint32_t critial_state = state ? 1 : 0; REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state); } - -uint32_t bios_get_vga_enabled_displays( - struct dc_bios *bios) -{ - return REG_READ(BIOS_SCRATCH_3) & 0XFFFF; -} - diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index e1b4a40a353d..ab162f2fe577 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -34,7 +34,6 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, bool bios_is_accelerated_mode(struct dc_bios *bios); void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); -uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index e93df3d6222e..bc123f1884da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -50,12 +50,13 @@ #include "link.h" #include "logger_types.h" + + +#include "yellow_carp_offset.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "yellow_carp_offset.h" - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 29eff386505a..91d872d6d392 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -53,9 +53,6 @@ #include "logger_types.h" -#undef DC_LOGGER -#define DC_LOGGER \ - clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 @@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; +#undef DC_LOGGER +#define DC_LOGGER \ + clk_mgr->base.base.ctx->logger #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index b46a3afe48ca..b77333817f18 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -55,6 +55,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger + #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -132,6 +133,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dccg *dccg = clk_mgr_internal->dccg; struct pipe_ctx *pipe = safe_to_lower ? &context->res_ctx.pipe_ctx[i] : &dc->current_state->res_ctx.pipe_ctx[i]; @@ -148,8 +151,21 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state * new_pipe->stream_res.stream_enc && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || - !pipe->stream->link_enc) && !stream_changed_otg_dig_on) { + + bool has_active_hpo = false; + + if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) { + has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && + dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe); + + } + + + if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) && + (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) { + + /* This w/a should not trigger when we have a dig active */ if (disable) { if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) @@ -257,11 +273,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 }; int i; - for (i = 0; i < context->stream_count; ++i) { const struct dc_stream_state *stream = context->streams[i]; const struct dc_link *link = stream->link; - uint8_t lowest_dpia_index = 0, hr_index = 0; + uint8_t lowest_dpia_index = 0; + unsigned int hr_index = 0; if (!link) continue; @@ -271,6 +287,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_ continue; hr_index = (link->link_index - lowest_dpia_index) / 2; + if (hr_index >= MAX_HOST_ROUTERS_NUM) + continue; host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing( &stream->timing, dc_link_get_highest_encoding_format(link)); } @@ -975,11 +993,8 @@ static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - bool ips_supported = true; - - ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false; - return ips_supported; + return dcn35_smu_get_ips_supported(clk_mgr) ? true : false; } static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a88f1b6ea64c..7872c6cabb14 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -621,8 +621,8 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream, * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. - * @enable: Enable CRC if true, disable otherwise. * @crc_window: CRC window (x/y start/end) information + * @enable: Enable CRC if true, disable otherwise. * @continuous: Capture CRC on every frame if true. Otherwise, only capture * once. * @@ -1157,6 +1157,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else { if (dc->ctx->dce_version < DCN_VERSION_2_0) color_space_to_black_color( @@ -1233,16 +1235,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) */ if (is_phantom) { if (tg->funcs->enable_crtc) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - - if (old_paired_stream) { - main_pipe_width = old_paired_stream->dst.width; - main_pipe_height = old_paired_stream->dst.height; - } - - if (dc->hwss.blank_phantom) - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); tg->funcs->enable_crtc(tg); } } @@ -1437,6 +1431,7 @@ void dc_hardware_init(struct dc *dc) detect_edp_presence(dc); if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) dc->hwss.init_hw(dc); + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); } void dc_init_callbacks(struct dc *dc, @@ -1876,6 +1871,41 @@ void dc_z10_save_init(struct dc *dc) dc->hwss.z10_save_init(dc); } +/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory + * Prevents over allocation of DET during unlock process + * e.g. 2 pipe config with different streams with a max of 20 DET segments + * Before: After: + * - Pipe0: 10 DET segments - Pipe0: 12 DET segments + * - Pipe1: 10 DET segments - Pipe1: 8 DET segments + * If Pipe0 gets updated first, 22 DET segments will be allocated + */ +static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) +{ + unsigned int i = 0; + struct pipe_ctx *pipe = NULL; + struct timing_generator *tg = NULL; + + if (!dc->config.set_pipe_unlock_order) + return; + + memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + + if (resource_calculate_det_for_stream(context, pipe) < + resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { + dc->scratch.pipes_to_unlock_first[i] = true; + } + } +} + /** * dc_commit_state_no_check - Apply context to the hardware * @@ -1974,6 +2004,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; } + determine_pipe_unlock_order(dc, context); /* Program all planes within new context*/ if (dc->res_pool->funcs->prepare_mcache_programming) dc->res_pool->funcs->prepare_mcache_programming(dc, context); @@ -2156,6 +2187,14 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; res = dc_validate_with_context(dc, set, params->stream_count, context, false); + + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + */ + if (res == DC_OK && dc->res_pool->funcs->link_encs_assign) + dc->res_pool->funcs->link_encs_assign( + dc, context, context->streams, context->stream_count); + if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2477,41 +2516,35 @@ static enum surface_update_type get_scaling_info_update_type( if (!u->scaling_info) return UPDATE_TYPE_FAST; - if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width + if (u->scaling_info->src_rect.width != u->surface->src_rect.width + || u->scaling_info->src_rect.height != u->surface->src_rect.height + || u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->clip_rect.width != u->surface->clip_rect.width + || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->scaling_quality.integer_scaling != - u->surface->scaling_quality.integer_scaling - ) { + u->surface->scaling_quality.integer_scaling) { update_flags->bits.scaling_change = 1; + if (u->scaling_info->src_rect.width > u->surface->src_rect.width + || u->scaling_info->src_rect.height > u->surface->src_rect.height) + /* Making src rect bigger requires a bandwidth change */ + update_flags->bits.clock_change = 1; + if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) && (u->scaling_info->dst_rect.width < u->surface->src_rect.width || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - } - - if (u->scaling_info->src_rect.width != u->surface->src_rect.width - || u->scaling_info->src_rect.height != u->surface->src_rect.height) { - update_flags->bits.scaling_change = 1; - if (u->scaling_info->src_rect.width > u->surface->src_rect.width - || u->scaling_info->src_rect.height > u->surface->src_rect.height) - /* Making src rect bigger requires a bandwidth change */ - update_flags->bits.clock_change = 1; + if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || + u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) + /* Changing clip size of a large surface may result in MPC slice count change */ + update_flags->bits.bandwidth_change = 1; } - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && - (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || - u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) - /* Changing clip size of a large surface may result in MPC slice count change */ - update_flags->bits.bandwidth_change = 1; - - if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || - u->scaling_info->clip_rect.height != u->surface->clip_rect.height) - update_flags->bits.clip_size_change = 1; - if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2520,13 +2553,13 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) update_flags->bits.position_change = 1; + /* process every update flag before returning */ if (update_flags->bits.clock_change || update_flags->bits.bandwidth_change || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.position_change || - update_flags->bits.clip_size_change) + if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -2617,7 +2650,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, type); } - if (update_flags->bits.lut_3d) { + if (update_flags->bits.lut_3d && + u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -2637,6 +2671,29 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return overall_type; } +/* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't + * while both planes are flip_immediate + */ +static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) +{ + bool has_flip_immediate_plane = false; + int i; + + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) { + has_flip_immediate_plane = true; + break; + } + } + + if (has_flip_immediate_plane && surface_count > 1) { + for (i = 0; i < surface_count; i++) { + if (updates[i].surface->flip_immediate) + updates[i].surface->update_flags.bits.addr_update = 1; + } + } +} + static enum surface_update_type check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, @@ -2699,6 +2756,9 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->scaler_sharpener_update) su_flags->bits.scaler_sharpener = 1; + if (stream_update->sharpening_required) + su_flags->bits.sharpening_required = 1; + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2870,10 +2930,20 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper) + if (srf_update->cm2_params) { + surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; + surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; + surface->mcm_luts = srf_update->cm2_params->cm2_luts; + } + + if (srf_update->func_shaper) { memcpy(&surface->in_shaper_func, srf_update->func_shaper, sizeof(surface->in_shaper_func)); + if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) + surface->mcm_luts.shaper = &surface->in_shaper_func; + } + if (srf_update->lut3d_func) memcpy(&surface->lut3d_func, srf_update->lut3d_func, sizeof(surface->lut3d_func)); @@ -2886,10 +2956,17 @@ static void copy_surface_update_to_plane( surface->sdr_white_level_nits = srf_update->sdr_white_level_nits; - if (srf_update->blend_tf) + if (srf_update->blend_tf) { memcpy(&surface->blend_tf, srf_update->blend_tf, sizeof(surface->blend_tf)); + if (surface->mcm_lut1d_enable) + surface->mcm_luts.lut1d_func = &surface->blend_tf; + } + + if (srf_update->cm2_params || srf_update->blend_tf) + surface->lut_bank_a = !surface->lut_bank_a; + if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -2901,11 +2978,7 @@ static void copy_surface_update_to_plane( if (srf_update->gamut_remap_matrix) surface->gamut_remap_matrix = *srf_update->gamut_remap_matrix; - if (srf_update->cm2_params) { - surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; - surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; - surface->mcm_luts = srf_update->cm2_params->cm2_luts; - } + if (srf_update->cursor_csc_color_matrix) surface->cursor_csc_color_matrix = *srf_update->cursor_csc_color_matrix; @@ -3037,6 +3110,8 @@ static void copy_stream_update_to_stream(struct dc *dc, } if (update->scaler_sharpener_update) stream->scaler_sharpener_update = *update->scaler_sharpener_update; + if (update->sharpening_required) + stream->sharpening_required = *update->sharpening_required; } static void backup_planes_and_stream_state( @@ -3153,6 +3228,11 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type == UPDATE_TYPE_FULL) backup_planes_and_stream_state(&dc->scratch.current_state, stream); @@ -3225,8 +3305,7 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type != UPDATE_TYPE_MED) continue; - if (surface->update_flags.bits.clip_size_change || - surface->update_flags.bits.position_change) { + if (surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -3625,6 +3704,10 @@ static void commit_planes_for_stream_fast(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; bool should_offload_fams2_flip = false; + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); + + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); if (dc->debug.fams2_config.bits.enable && dc->debug.fams2_config.bits.enable_offload_flip && @@ -3677,13 +3760,14 @@ static void commit_planes_for_stream_fast(struct dc *dc, if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; + pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ pipe_ctx->plane_state->triplebuffer_flips = true; } } @@ -3742,6 +3826,8 @@ static void commit_planes_for_stream(struct dc *dc, bool subvp_curr_use = false; uint8_t current_stream_mask = 0; + if (should_lock_all_pipes) + determine_pipe_unlock_order(dc, context); // Once we apply the new subvp context to hardware it won't be in the // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context @@ -3749,7 +3835,7 @@ static void commit_planes_for_stream(struct dc *dc, dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); - if (update_type == UPDATE_TYPE_FULL) + if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) hwss_process_outstanding_hw_updates(dc, dc->current_state); for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -3776,6 +3862,9 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + if (update_type == UPDATE_TYPE_FULL) + hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); + top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); @@ -3920,19 +4009,20 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; if (!pipe_ctx->plane_state) continue; - if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) continue; pipe_ctx->plane_state->triplebuffer_flips = false; if (update_type == UPDATE_TYPE_FAST && - dc->hwss.program_triplebuffer != NULL && - !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { - /*triple buffer for VUpdate only*/ - pipe_ctx->plane_state->triplebuffer_flips = true; + dc->hwss.program_triplebuffer != NULL && + !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { + /*triple buffer for VUpdate only*/ + pipe_ctx->plane_state->triplebuffer_flips = true; } } if (update_type == UPDATE_TYPE_FULL) { /* force vsync flip when reconfiguring pipes to prevent underflow */ plane_state->flip_immediate = false; + plane_state->triplebuffer_flips = false; } } @@ -3953,7 +4043,6 @@ static void commit_planes_for_stream(struct dc *dc, continue; ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( @@ -4028,7 +4117,7 @@ static void commit_planes_for_stream(struct dc *dc, /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { - /*only enable triplebuffer for fast_update*/ + /*only enable triplebuffer for fast_update*/ dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } @@ -4777,6 +4866,11 @@ static bool update_planes_and_stream_v1(struct dc *dc, update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. + * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip + * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. + */ + force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); if (update_type >= UPDATE_TYPE_FULL) { @@ -5338,8 +5432,10 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { - if (dc->debug.disable_idle_power_optimizations) + if (dc->debug.disable_idle_power_optimizations) { + DC_LOG_DEBUG("%s: disabled\n", __func__); return; + } if (allow != dc->idle_optimizations_allowed) DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, @@ -5356,8 +5452,10 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const return; if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && - dc->hwss.apply_idle_power_optimizations(dc, allow)) + dc->hwss.apply_idle_power_optimizations(dc, allow)) { dc->idle_optimizations_allowed = allow; + DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); + } } void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) @@ -5999,7 +6097,12 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state { struct dc_power_profile profile = { 0 }; - profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support; + if (!context || !context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) + return profile; + struct dc *dc = context->clk_mgr->ctx->dc; + + if (dc->res_pool->funcs->get_power_profile) + profile.power_level = dc->res_pool->funcs->get_power_profile(context); return profile; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 801cdbc8117d..af1ea5792560 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -46,11 +46,6 @@ DC_LOG_IF_TRACE(__VA_ARGS__); \ } while (0) -#define TIMING_TRACE(...) do {\ - if (dc->debug.timing_trace) \ - DC_LOG_SYNC(__VA_ARGS__); \ -} while (0) - #define CLOCK_TRACE(...) do {\ if (dc->debug.clock_trace) \ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \ @@ -306,43 +301,6 @@ void post_surface_trace(struct dc *dc) } -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx) -{ - int i; - int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; - struct crtc_position position; - unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; - DC_LOGGER_INIT(dc->ctx->logger); - - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - /* get_position() returns CRTC vertical/horizontal counter - * hence not applicable for underlay pipe - */ - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); - h_pos[i] = position.horizontal_count; - v_pos[i] = position.vertical_count; - } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - - if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) - continue; - - TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", - pipe_ctx->stream_res.tg->inst, - pipe_ctx->stream->timing.h_total, - pipe_ctx->stream->timing.v_total, - h_pos[i], v_pos[i]); - } -} - void context_clock_trace( struct dc *dc, struct dc_state *context) @@ -434,3 +392,43 @@ char *dc_status_to_str(enum dc_status status) return "Unexpected status error"; } + +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding) +{ + switch (pixel_encoding) { + case PIXEL_ENCODING_RGB: + return "RGB"; + case PIXEL_ENCODING_YCBCR422: + return "YUV422"; + case PIXEL_ENCODING_YCBCR444: + return "YUV444"; + case PIXEL_ENCODING_YCBCR420: + return "YUV420"; + default: + return "Unknown"; + } +} + +char *dc_color_depth_to_str(enum dc_color_depth color_depth) +{ + switch (color_depth) { + case COLOR_DEPTH_666: + return "6-bpc"; + case COLOR_DEPTH_888: + return "8-bpc"; + case COLOR_DEPTH_101010: + return "10-bpc"; + case COLOR_DEPTH_121212: + return "12-bpc"; + case COLOR_DEPTH_141414: + return "14-bpc"; + case COLOR_DEPTH_161616: + return "16-bpc"; + case COLOR_DEPTH_999: + return "9-bpc"; + case COLOR_DEPTH_111111: + return "11-bpc"; + default: + return "Unknown"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7ee2be8f82c4..0419ee7f22a5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -312,11 +312,11 @@ void get_mpctree_visual_confirm_color( { const struct tg_color pipe_colors[6] = { {MAX_TG_COLOR_VALUE, 0, 0}, /* red */ - {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */ {0, MAX_TG_COLOR_VALUE, 0}, /* green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */ - {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */ }; struct pipe_ctx *top_pipe = pipe_ctx; @@ -497,6 +497,23 @@ void get_mclk_switch_visual_confirm_color( } } +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + if (pipe_ctx->stream && pipe_ctx->stream->cursor_position.enable) { + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } else { + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + } +} + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, @@ -1071,8 +1088,13 @@ void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_con if (!pipe_ctx->stream) continue; - if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) - pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); + /* For full update we must wait for all double buffer updates, not just DRR updates. This + * is particularly important for minimal transitions. Only check for OTG_MASTER pipes, + * as non-OTG Master pipes share the same OTG as + */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && dc->hwss.wait_for_all_pending_updates) { + dc->hwss.wait_for_all_pending_updates(pipe_ctx); + } hubp = pipe_ctx->plane_res.hubp; if (!hubp) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index dfdfe22d9e85..457d60eeb486 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -430,11 +430,10 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, } bool dc_link_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { return link->dc->link_srv->edp_set_backlight_level(link, - backlight_pwm_u16_16, frame_ramp); + backlight_level_params); } bool dc_link_set_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c7599c40d4be..33125b95c3a1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -765,25 +765,6 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } -/* - * This is a preliminary vp size calculation to allow us to check taps support. - * The result is completely overridden afterwards. - */ -static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) -{ - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - - data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width)); - data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height)); - data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width)); - data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height)); - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { - swap(data->viewport.width, data->viewport.height); - swap(data->viewport_c.width, data->viewport_c.height); - } -} - static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) { struct rect rec; @@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx); + struct scaling_taps temp = {0}; bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1525,8 +1507,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) calculate_recout(pipe_ctx); /* depends on pixel format */ calculate_scaling_ratios(pipe_ctx); - /* depends on scaling ratios and recout, does not calculate offset yet */ - calculate_viewport_size(pipe_ctx); /* * LB calculations depend on vp size, h/v_active and scaling ratios @@ -1547,6 +1527,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; + // get TAP value with 100x100 dummy data for max scaling qualify, override + // if a new scaling quality required + pipe_ctx->plane_res.scl_data.viewport.width = 100; + pipe_ctx->plane_res.scl_data.viewport.height = 100; + pipe_ctx->plane_res.scl_data.viewport_c.width = 100; + pipe_ctx->plane_res.scl_data.viewport_c.height = 100; + if (pipe_ctx->plane_res.xfm != NULL) + res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( + pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + if (pipe_ctx->plane_res.dpp != NULL) + res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + temp = pipe_ctx->plane_res.scl_data.taps; + + calculate_inits_and_viewports(pipe_ctx); + if (pipe_ctx->plane_res.xfm != NULL) res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); @@ -1573,11 +1571,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) &plane_state->scaling_quality); } - /* - * Depends on recout, scaling ratios, h_active and taps - * May need to re-check lb size after this in some obscure scenario - */ - if (res) + if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps || + pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps || + pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c || + pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c)) calculate_inits_and_viewports(pipe_ctx); /* @@ -4094,14 +4091,6 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; - /* - * Only update link encoder to stream assignment after bandwidth validation passed. - * TODO: Split out assignment and validation. - */ - if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); - return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 9a406d74c0dd..55dc482d9b36 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -292,7 +292,9 @@ bool dc_stream_set_cursor_attributes( * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ - if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (dc->debug.allow_sw_cursor_fallback && + attributes->height * attributes->width * 4 > 16384 && + !stream->hw_cursor_req) { if (check_subvp_sw_cursor_fallback_req(dc, stream)) return false; } @@ -421,7 +423,6 @@ bool dc_stream_program_cursor_position( /* apply/update visual confirm */ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) { /* update software state */ - uint32_t color_value = MAX_TG_COLOR_VALUE; int i; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -429,15 +430,7 @@ bool dc_stream_program_cursor_position( /* adjust visual confirm color for all pipes with current stream */ if (stream == pipe_ctx->stream) { - if (stream->cursor_position.enable) { - pipe_ctx->visual_confirm_color.color_r_cr = color_value; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = 0; - } else { - pipe_ctx->visual_confirm_color.color_r_cr = 0; - pipe_ctx->visual_confirm_color.color_g_y = 0; - pipe_ctx->visual_confirm_color.color_b_cb = color_value; - } + get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); /* programming hardware */ if (pipe_ctx->plane_state) @@ -819,12 +812,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->dst.height, stream->output_color_space); DC_LOG_DC( - "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", + "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n", stream->timing.pix_clk_100hz / 10, stream->timing.h_total, stream->timing.v_total, - stream->timing.pixel_encoding, - stream->timing.display_color_depth); + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth)); DC_LOG_DC( "\tlink: %d\n", stream->link->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3992ad73165b..e143fab00a86 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.301" +#define DC_VER "3.2.309" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -225,6 +225,11 @@ struct dc_dmub_caps { bool subvp_psr; bool gecc_enable; uint8_t fams_ver; + bool aux_backlight_support; +}; + +struct dc_scl_caps { + bool sharpener_support; }; struct dc_caps { @@ -292,6 +297,7 @@ struct dc_caps { bool sequential_ono; /* Conservative limit for DCC cases which require ODM4:1 to support*/ uint32_t dcc_plane_width_limit; + struct dc_scl_caps scl_caps; }; struct dc_bug_wa { @@ -463,6 +469,7 @@ struct dc_config { unsigned int enable_fpo_flicker_detection; bool disable_hbr_audio_dp2; bool consolidated_dpia_dp_lt; + bool set_pipe_unlock_order; }; enum visual_confirm { @@ -862,7 +869,6 @@ struct dc_debug_options { bool sanity_checks; bool max_disp_clk; bool surface_trace; - bool timing_trace; bool clock_trace; bool validation_trace; bool bandwidth_calcs_trace; @@ -1061,6 +1067,7 @@ struct dc_debug_options { unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; bool skip_full_updated_if_possible; + unsigned int enable_oled_edp_power_up_opt; }; @@ -1253,7 +1260,6 @@ union surface_update_flags { uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; - uint32_t clip_size_change: 1; uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; @@ -1355,6 +1361,7 @@ struct dc_plane_state { enum mpcc_movable_cm_location mcm_location; struct dc_csc_transform cursor_csc_color_matrix; bool adaptive_sharpness_en; + int adaptive_sharpness_policy; int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; @@ -1461,6 +1468,7 @@ struct dc { struct dc_scratch_space current_state; struct dc_scratch_space new_state; struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */ } scratch; struct dml2_configuration_options dml2_options; @@ -1513,7 +1521,7 @@ struct dc_surface_update { * change cm2_params.component_settings: Full update * change cm2_params.cm2_luts: Fast update */ - struct dc_cm2_parameters *cm2_params; + const struct dc_cm2_parameters *cm2_params; const struct dc_csc_transform *cursor_csc_color_matrix; unsigned int sdr_white_level_nits; }; @@ -1770,7 +1778,6 @@ struct dc_link { bool dongle_mode_timing_override; bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; - bool disable_assr_for_uhbr; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1786,6 +1793,7 @@ struct dc_link { // BW ALLOCATON USB4 ONLY struct dc_dpia_bw_alloc dpia_bw_alloc_config; bool skip_implict_edp_power_control; + enum backlight_control_type backlight_control_type; }; /* Return an enumerated dc_link. @@ -2203,8 +2211,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link, * and 16 bit fractional, where 1.0 is max backlight value. */ bool dc_link_set_backlight_level(const struct dc_link *dc_link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); /* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */ bool dc_link_set_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 1e7de0f03290..f90fc154549a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -519,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi union dmub_rb_cmd cmd = { 0 }; unsigned int panel_inst = 0; - if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst)) + if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && + dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) return; memset(&cmd, 0, sizeof(cmd)); @@ -1012,7 +1013,6 @@ static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) memset(&new_signals, 0, sizeof(new_signals)); + new_signals.bits.allow_idle = 1; /* always set */ + if (dc->config.disable_ips == DMUB_IPS_ENABLE || dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { new_signals.bits.allow_pg = 1; @@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) */ dc_dmub_srv->needs_idle_wake = false; - if (prev_driver_signals.bits.allow_ips2 && + if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( @@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) } dc_dmub_srv_notify_idle(dc, false); - if (prev_driver_signals.bits.allow_ips1) { + if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", ips_fw->signals.bits.ips1_commit, @@ -1862,3 +1864,81 @@ void dc_dmub_srv_fams2_passthrough_flip( dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); } } + +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) +{ + bool result; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, + start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + + return result; +} + +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +{ + uint32_t i; + enum dmub_gpint_command command_code; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + switch (output->ips_mode) { + case DMUB_IPS_MODE_IPS1_MAX: + command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS2: + command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_RCG: + command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; + break; + case DMUB_IPS_MODE_IPS1_ONO2_ON: + command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; + break; + default: + command_code = DMUB_GPINT__INVALID_COMMAND; + break; + } + + if (command_code == DMUB_GPINT__INVALID_COMMAND) + return; + + // send gpint commands and wait for ack + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, + (uint16_t)(output->ips_mode), + &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->residency_percent = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, + (uint16_t)(output->ips_mode), + &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->entry_counter = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_active_time_us[1] = 0; + + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[0] = 0; + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, + (uint16_t)(output->ips_mode), + &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->total_inactive_time_us[1] = 0; + + // NUM_IPS_HISTOGRAM_BUCKETS = 16 + for (i = 0; i < 16; i++) + if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + output->histogram[i] = 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 42f0cb672d8b..10b48198b7a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -209,4 +209,43 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_stream_state *stream, struct dc_surface_update *srf_updates, int surface_count); + +/** + * struct ips_residency_info - struct containing info from dmub_ips_residency_stats + * + * @ips_mode: The mode of IPS that the follow stats appertain to + * @residency_percent: The percentage of time spent in given IPS mode in millipercent + * @entry_counter: The number of entries made in to this IPS state + * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode + * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. + * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c + */ +struct ips_residency_info { + enum dmub_ips_mode ips_mode; + unsigned int residency_percent; + unsigned int entry_counter; + unsigned int total_active_time_us[2]; + unsigned int total_inactive_time_us[2]; + unsigned int histogram[16]; +}; + +/** + * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status + * + * @dc_dmub_srv: The DC DMUB service pointer + * @start_measurement: Describes whether to start or stop measurement + * + * Return: true if GPINT was sent successfully, false otherwise + */ +bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); + +/** + * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * + * @dc_dmub_srv: The DC DMUB service pointer + * @output: Output struct to copy the the residency info to + */ +void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 41bd95e9177a..8dd6eb044829 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1166,6 +1166,7 @@ struct dpcd_caps { int8_t branch_dev_name[6]; int8_t branch_hw_revision; int8_t branch_fw_revision[2]; + int8_t branch_vendor_specific_data[4]; bool allow_invalid_MSA_timing_param; bool panel_mode_edp; @@ -1191,6 +1192,7 @@ struct dpcd_caps { struct edp_psr_info psr_info; struct replay_info pr_info; + uint16_t edp_oled_emission_rate; }; union dpcd_sink_ext_caps { @@ -1204,7 +1206,7 @@ union dpcd_sink_ext_caps { uint8_t oled : 1; uint8_t reserved_2 : 1; uint8_t miniled : 1; - uint8_t reserved : 1; + uint8_t emission_output : 1; } bits; uint8_t raw; }; @@ -1358,6 +1360,9 @@ struct dp_trace { #ifndef DP_TUNNELING_IRQ #define DP_TUNNELING_IRQ (1 << 5) #endif +#ifndef DP_BRANCH_VENDOR_SPECIFIC_START +#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C +#endif /** USB4 DPCD BW Allocation Registers Chapter 10.7 **/ #ifndef DP_TUNNELING_CAPABILITIES #define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index 44afcd989224..bd37ec82b42d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -26,7 +26,6 @@ #ifndef _DC_PLANE_H_ #define _DC_PLANE_H_ -#include "dc.h" #include "dc_hw_types.h" struct dc_plane_state *dc_create_plane_state(const struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 603552dbd771..c8d8e335fa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -8,13 +8,13 @@ #include "dcn32/dcn32_dpp.h" #include "dcn401/dcn401_dpp.h" -static struct spl_funcs dcn2_spl_funcs = { +static struct spl_callbacks dcn2_spl_callbacks = { .spl_calc_lb_num_partitions = dscl2_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn32_spl_funcs = { +static struct spl_callbacks dcn32_spl_callbacks = { .spl_calc_lb_num_partitions = dscl32_spl_calc_lb_num_partitions, }; -static struct spl_funcs dcn401_spl_funcs = { +static struct spl_callbacks dcn401_spl_callbacks = { .spl_calc_lb_num_partitions = dscl401_spl_calc_lb_num_partitions, }; static void populate_splrect_from_rect(struct spl_rect *spl_rect, const struct rect *rect) @@ -38,6 +38,7 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality, spl_scaling_quality->h_taps = scaling_quality->h_taps; spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c; spl_scaling_quality->v_taps = scaling_quality->v_taps; + spl_scaling_quality->integer_scaling = scaling_quality->integer_scaling; } static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality, const struct spl_taps *spl_scaling_quality) @@ -76,16 +77,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl // This is used to determine the vtap support switch (plane_state->ctx->dce_version) { case DCN_VERSION_2_0: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; break; case DCN_VERSION_3_2: - spl_in->funcs = &dcn32_spl_funcs; + spl_in->callbacks = dcn32_spl_callbacks; break; case DCN_VERSION_4_01: - spl_in->funcs = &dcn401_spl_funcs; + spl_in->callbacks = dcn401_spl_callbacks; break; default: - spl_in->funcs = &dcn2_spl_funcs; + spl_in->callbacks = dcn2_spl_callbacks; } // Make format field from spl_in point to plane_res scl_data format spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format; @@ -187,14 +188,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active; spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active; - spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy; + spl_in->sharpen_policy = (enum sharpen_policy)plane_state->adaptive_sharpness_policy; spl_in->debug.scale_to_sharpness_policy = (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy; /* Check if it is stream is in fullscreen and if its HDR. * Use this to determine sharpness levels */ - spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream); + spl_in->is_fullscreen = pipe_ctx->stream->sharpening_required; spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream); spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index caa45db50232..db1e63a7d460 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -26,7 +26,6 @@ #ifndef _DC_STATE_H_ #define _DC_STATE_H_ -#include "dc.h" #include "inc/core_status.h" struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 14ea47eda0c8..413970588a26 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -143,6 +143,7 @@ union stream_update_flags { uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; uint32_t scaler_sharpener : 1; + uint32_t sharpening_required : 1; } bits; uint32_t raw; @@ -310,6 +311,7 @@ struct dc_stream_state { struct luminance_data lumin_data; bool scaler_sharpener_update; + bool sharpening_required; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -356,6 +358,7 @@ struct dc_stream_update { struct dc_cursor_position *cursor_position; bool *hw_cursor_req; bool *scaler_sharpener_update; + bool *sharpening_required; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 6d7989b751e2..edf4df1d03b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -179,6 +179,9 @@ struct dc_panel_patch { unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; + uint8_t blankstream_before_otg_off; + bool oled_optimize_display_on; + unsigned int force_mst_blocked_discovery; }; struct dc_edid_caps { @@ -922,6 +925,12 @@ struct display_endpoint_id { enum display_endpoint_type ep_type; }; +enum backlight_control_type { + BACKLIGHT_CONTROL_PWM = 0, + BACKLIGHT_CONTROL_VESA_AUX = 1, + BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) struct otg_phy_mux { uint8_t phy_output_num; @@ -1295,4 +1304,31 @@ struct dc_commit_streams_params { enum dc_power_source_type power_source; }; +struct set_backlight_level_params { + /* backlight in pwm */ + uint32_t backlight_pwm_u16_16; + /* brightness ramping */ + uint32_t frame_ramp; + /* backlight control type + * 0: PWM backlight control + * 1: VESA AUX backlight control + * 2: AMD AUX backlight control + */ + enum backlight_control_type control_type; + /* backlight in millinits */ + uint32_t backlight_millinits; + /* transition time in ms */ + uint32_t transition_time_in_ms; + /* minimum luminance in nits */ + uint32_t min_luminance; + /* maximum luminance in nits */ + uint32_t max_luminance; + /* minimum backlight in pwm */ + uint32_t min_backlight_pwm; + /* maximum backlight in pwm */ + uint32_t max_backlight_pwm; + /* AUX HW instance */ + uint8_t aux_inst; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b889004509a..d3e46c3cfa57 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk( int otg_inst, int dp_hpo_inst) { - /* set the dtbclk_p source */ - dccg401_set_dtbclk_p_src(dccg, src, otg_inst); - /* enabled to select one of the DTBCLKs for pipe */ if (src == REFCLK) dccg401_disable_dpstreamclk(dccg, dp_hpo_inst); @@ -805,33 +802,6 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); - break; - } - switch (stream_enc_inst) { case 0: REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, @@ -864,37 +834,8 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst } } -/*get other front end connected to this backend*/ -static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst) -{ - uint8_t num_enabled_symclk_fe = 0; - uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0}; - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - uint8_t i; - - REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0], - SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]); - - REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1], - SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]); - - REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2], - SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]); - - REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3], - SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]); - - for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) { - if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst) - num_enabled_symclk_fe++; - } - - return num_enabled_symclk_fe; -} - static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) { - uint8_t num_enabled_symclk_fe = 0; struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); switch (stream_enc_inst) { @@ -919,31 +860,6 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins SYMCLKD_FE_SRC_SEL, 0); break; } - - /*check other enabled symclk fe connected to this be */ - num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst); - /*only turn off backend clk if other front ends attached to this backend are all off, - for mst, only turn off the backend if this is the last front end*/ - if (num_enabled_symclk_fe == 0) { - switch (link_enc_inst) { - case 0: - REG_UPDATE(SYMCLKA_CLOCK_ENABLE, - SYMCLKA_CLOCK_ENABLE, 0); - break; - case 1: - REG_UPDATE(SYMCLKB_CLOCK_ENABLE, - SYMCLKB_CLOCK_ENABLE, 0); - break; - case 2: - REG_UPDATE(SYMCLKC_CLOCK_ENABLE, - SYMCLKC_CLOCK_ENABLE, 0); - break; - case 3: - REG_UPDATE(SYMCLKD_CLOCK_ENABLE, - SYMCLKD_CLOCK_ENABLE, 0); - break; - } - } } static const struct dccg_funcs dccg401_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 5c2825bc9a87..d199e4ed2e59 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -277,7 +277,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dynamic_range_rgb = 0; /*full range*/ uint8_t dynamic_range_ycbcr = 1; /*bt709*/ @@ -380,7 +379,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; if (REG(DP_MSA_TIMING_PARAM1)) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c index db7557a1c613..8a3fbf95c48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c @@ -76,7 +76,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT; @@ -112,7 +111,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MAS mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L, value); - temp = 0; value = 0; temp = address.low_part >> UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT; diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index 8db9f7514466..889f314cac65 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -717,7 +717,7 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index eaed5d1c398a..dcd2cdfe91eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -365,23 +365,18 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^1 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 05df502a54f2..88cf47a5ea75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -46,7 +46,7 @@ #include "clk_mgr.h" __printf(3, 4) -unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...) +unsigned int snprintf_count(char *pbuf, unsigned int bufsize, const char *fmt, ...) { int ret_vsnprintf; unsigned int chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index f31f0e3abfc0..0690c346f2c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -140,23 +140,18 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 11 segments - * segment is from 2^-10 to 2^0 + /* 13 segments + * segment is from 2^-12 to 2^0 * There are less than 256 points, for optimization */ - seg_distr[0] = 3; - seg_distr[1] = 4; - seg_distr[2] = 4; - seg_distr[3] = 4; - seg_distr[4] = 4; - seg_distr[5] = 4; - seg_distr[6] = 4; - seg_distr[7] = 4; - seg_distr[8] = 4; - seg_distr[9] = 4; - seg_distr[10] = 1; - - region_start = -10; + const uint8_t SEG_COUNT = 12; + + for (i = 0; i < SEG_COUNT; i++) + seg_distr[i] = 4; + + seg_distr[SEG_COUNT] = 1; + + region_start = -SEG_COUNT; region_end = 1; } @@ -285,157 +280,6 @@ bool cm3_helper_translate_curve_to_hw_format( return true; } -#define NUM_DEGAMMA_REGIONS 12 - - -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params) -{ - struct curve_points3 *corner_points; - struct pwl_result_data *rgb_resulted; - struct pwl_result_data *rgb; - struct pwl_result_data *rgb_plus_1; - - int32_t region_start, region_end; - int32_t i; - uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points; - - if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) - return false; - - corner_points = lut_params->corner_points; - rgb_resulted = lut_params->rgb_resulted; - hw_points = 0; - - memset(lut_params, 0, sizeof(struct pwl_params)); - memset(seg_distr, 0, sizeof(seg_distr)); - - region_start = -NUM_DEGAMMA_REGIONS; - region_end = 0; - - - for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) - seg_distr[i] = -1; - /* 12 segments - * segments are from 2^-12 to 0 - */ - for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++) - seg_distr[i] = 4; - - for (k = 0; k < MAX_REGIONS_NUMBER; k++) { - if (seg_distr[k] != -1) - hw_points += (1 << seg_distr[k]); - } - - j = 0; - for (k = 0; k < (region_end - region_start); k++) { - increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]); - start_index = (region_start + k + MAX_LOW_POINT) * - NUMBER_SW_SEGMENTS; - for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS; - i += increment) { - if (j == hw_points - 1) - break; - if (i >= TRANSFER_FUNC_POINTS) - return false; - rgb_resulted[j].red = output_tf->tf_pts.red[i]; - rgb_resulted[j].green = output_tf->tf_pts.green[i]; - rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; - j++; - } - } - - /* last point */ - start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS; - rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index]; - rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; - rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - - corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_start)); - corner_points[0].green.x = corner_points[0].red.x; - corner_points[0].blue.x = corner_points[0].red.x; - corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_end)); - corner_points[1].green.x = corner_points[1].red.x; - corner_points[1].blue.x = corner_points[1].red.x; - - corner_points[0].red.y = rgb_resulted[0].red; - corner_points[0].green.y = rgb_resulted[0].green; - corner_points[0].blue.y = rgb_resulted[0].blue; - - /* see comment above, m_arrPoints[1].y should be the Y value for the - * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) - */ - corner_points[1].red.y = rgb_resulted[hw_points - 1].red; - corner_points[1].green.y = rgb_resulted[hw_points - 1].green; - corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; - corner_points[1].red.slope = dc_fixpt_zero; - corner_points[1].green.slope = dc_fixpt_zero; - corner_points[1].blue.slope = dc_fixpt_zero; - - if (output_tf->tf == TRANSFER_FUNCTION_PQ) { - /* for PQ, we want to have a straight line from last HW X point, - * and the slope to be such that we hit 1.0 at 10000 nits. - */ - const struct fixed31_32 end_value = - dc_fixpt_from_int(125); - - corner_points[1].red.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), - dc_fixpt_sub(end_value, corner_points[1].red.x)); - corner_points[1].green.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), - dc_fixpt_sub(end_value, corner_points[1].green.x)); - corner_points[1].blue.slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), - dc_fixpt_sub(end_value, corner_points[1].blue.x)); - } - - lut_params->hw_points_num = hw_points; - - k = 0; - for (i = 1; i < MAX_REGIONS_NUMBER; i++) { - if (seg_distr[k] != -1) { - lut_params->arr_curve_points[k].segments_num = - seg_distr[k]; - lut_params->arr_curve_points[i].offset = - lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); - } - k++; - } - - if (seg_distr[k] != -1) - lut_params->arr_curve_points[k].segments_num = seg_distr[k]; - - rgb = rgb_resulted; - rgb_plus_1 = rgb_resulted + 1; - - i = 1; - while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); - - ++rgb_plus_1; - ++rgb; - ++i; - } - cm3_helper_convert_to_custom_float(rgb_resulted, - lut_params->corner_points, - hw_points, false); - - return true; -} - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index f496e952ceec..d01a8b8f9595 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -255,7 +255,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -362,7 +361,6 @@ void enc1_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c index 5b343f745cf3..ae81451a3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc) REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); } +static bool enc314_is_fifo_enabled(struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val; + + REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val); + return (reset_val != 0); +} + void enc314_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) @@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = { .enable_fifo = enc314_enable_fifo, .disable_fifo = enc314_disable_fifo, + .is_fifo_enabled = enc314_is_fifo_enabled, .set_input_mode = enc314_set_dig_input_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index 0a27e0942a12..098c2a01a850 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -447,7 +447,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; - uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dp_pixel_encoding = 0; uint8_t dp_component_depth = 0; @@ -603,7 +602,6 @@ void enc401_stream_encoder_dp_set_stream_attribute( break; } - misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; switch (output_color_space) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 9405c47ee2a9..f81e5a4e1d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -143,7 +143,7 @@ void generic_reg_wait(const struct dc_context *ctx, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line); -unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...); +unsigned int snprintf_count(char *pBuf, unsigned int bufSize, const char *fmt, ...); /* These macros need to be used with soc15 registers in order to retrieve * the actual offset. diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 565f3c492477..0c8c4a080c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -785,12 +785,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 9d6675ecc5f1..c935903b68e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -845,12 +845,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 4fce64a030b6..390c1a77fda6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 3fa9a5da02f6..843d6004258c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index eb3ed965e48b..cd8cca651419 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -1049,12 +1049,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 9e1c18b90805..5718000627b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -435,8 +435,6 @@ static void get_meta_and_pte_attr( blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -485,8 +483,6 @@ static void get_meta_and_pte_attr( - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 1c10ba4dcdde..cee1b351e105 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -1280,12 +1280,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; @@ -1775,15 +1772,6 @@ static unsigned int CalculateVMAndRowBytes( *PixelPTEReqWidth = 32768.0 / BytePerPixel; *PTERequestSize = 64; FractionOfPTEReturnDrop = 0; - } else if (MacroTileSizeBytes == 4096) { - PixelPTEReqHeightPTEs = 1; - *PixelPTEReqHeight = MacroTileHeight; - *PixelPTEReqWidth = 8 * *MacroTileWidth; - *PTERequestSize = 64; - if (ScanDirection != dm_vert) - FractionOfPTEReturnDrop = 0; - else - FractionOfPTEReturnDrop = 7.0 / 8; } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) { PixelPTEReqHeightPTEs = 16; *PixelPTEReqHeight = 16 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index b28fcc8608ff..76d3bb3c9155 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -392,8 +392,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double)blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -464,8 +462,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 2b275e680379..f567a9023682 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1444,12 +1444,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index b57b095cd4a8..c46bda2141ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -413,8 +413,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -481,8 +479,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index debfa31583a6..5865e8fa2d8e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -1461,12 +1461,9 @@ static bool CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c index 61b3bebf24c9..b7d2a0caec11 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c @@ -501,8 +501,6 @@ static void get_meta_and_pte_attr( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); - log2_blk_height = 0; - log2_blk_width = 0; // remember log rule // "+" in log is multiply @@ -569,8 +567,6 @@ static void get_meta_and_pte_attr( log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height; meta_req_width = 1 << log2_meta_req_width; meta_req_height = 1 << log2_meta_req_height; - log2_meta_row_height = 0; - meta_row_width_ub = 0; // the dimensions of a meta row are meta_row_width x meta_row_height in elements. // calculate upper bound of the meta_row_width diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index d92fb428ee96..86ac7d59fd32 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -4097,12 +4097,9 @@ bool dml32_CalculatePrefetchSchedule( if (MyError) { *PrefetchBandwidth = 0; - TimeForFetchingMetaPTE = 0; - TimeForFetchingRowInVBlank = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; - LinesToRequestPrefetchPixelData = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index a201dbb743d7..d9e63c4fdd95 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -204,8 +204,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .num_states = 8, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 250.0, - .sr_enter_plus_exit_z8_time_us = 350.0, + .sr_exit_z8_time_us = 263.0, + .sr_enter_plus_exit_z8_time_us = 363.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index d8bfc85e5dcd..88dc2b97e7bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -559,12 +559,11 @@ static void get_surf_rq_param( const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param, bool is_chroma) { - bool mode_422 = 0; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; unsigned int meta_pitch = 0; - unsigned int ppe = mode_422 ? 2 : 1; + unsigned int ppe = 1; bool surf_linear; bool surf_vert; unsigned int bytes_per_element; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 8697eac1e1f7..138b4b1e42ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -859,7 +859,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->immediate_flip = plane_state->flip_immediate; plane->composition.rect_out_height_spans_vactive = - plane_state->dst_rect.height >= stream->timing.v_addressable && + plane_state->dst_rect.height >= stream->src.height && stream->dst.height >= stream->timing.v_addressable; } @@ -1036,6 +1036,7 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported; context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0; context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz; + context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz; } void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index d35dd507cb9f..bbc28b9a15a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -13,11 +13,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,7 +27,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 83fc15bf13cf..25b607e7b726 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -88,6 +88,7 @@ struct dml2_display_arb_regs { uint32_t sdpif_request_rate_limit; uint32_t allow_sdpif_rate_limit_when_cstate_req; uint32_t dcfclk_deep_sleep_hysteresis; + uint32_t pstate_stall_threshold; }; struct dml2_cursor_dlg_regs{ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index 0aa4e4d343b0..3d41ffde91c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -159,6 +159,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 3ea54fd52e46..92e43a1e4dd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -12236,6 +12236,8 @@ static void rq_dlg_get_dlg_reg( static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param) { + double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz; + arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs; arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4; @@ -12247,6 +12249,7 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co arb_param->compbuf_size = mode_lib->mp.CompressedBufferSizeInkByte / mode_lib->ip.compressed_buffer_segment_size_in_kbytes; arb_param->allow_sdpif_rate_limit_when_cstate_req = dml_get_hw_debug5(mode_lib); arb_param->dcfclk_deep_sleep_hysteresis = dml_get_dcfclk_deep_sleep_hysteresis(mode_lib); + arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz); #ifdef __DML_VBA_DEBUG__ dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index ab229e1598ae..714b5c39b7e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -425,6 +425,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters phantom->timing.v_total = meta->v_total; phantom->timing.v_active = meta->v_active; phantom->timing.v_front_porch = meta->v_front_porch; + phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active; phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active; phantom->timing.drr_config.enabled = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index dd9971867f74..92269f0e50ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -1799,6 +1799,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp } if (s->pmo_dcn4.num_pstate_candidates > 0) { + s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates - 1].allow_state_increase = true; s->pmo_dcn4.cur_pstate_candidate = -1; return true; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 866b0abcff1b..9190c1328d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -209,8 +209,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1; if (odms_needed <= unused_dpps) { - unused_dpps -= odms_needed; - if (odms_needed == 1) { p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1; optimization_done = true; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h index cd1706d301e7..f09cba8e29cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -690,6 +690,7 @@ struct dcn20_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index b110f35ef66b..f236824126e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -572,6 +572,7 @@ struct dcn3_dpp { int lb_memory_size; int lb_bits_per_entry; bool is_write_to_ram_a_safe; + bool dispclk_r_gate_disable; struct scaler_data scl_data; struct pwl_params pwl_data; }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index 8473c694bfdc..62b7012cda43 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -50,13 +50,21 @@ void dpp35_dppclk_control( DPPCLK_RATE_CONTROL, dppclk_div, DPP_CLOCK_ENABLE, 1); else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 1, DISPCLK_R_GATE_DISABLE, 1); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1); } else - REG_UPDATE_2(DPP_CONTROL, + if (dpp->dispclk_r_gate_disable) + REG_UPDATE_2(DPP_CONTROL, DPP_CLOCK_ENABLE, 0, DISPCLK_R_GATE_DISABLE, 0); + else + REG_UPDATE(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0); } void dpp35_program_bias_and_scale_fcnv( @@ -128,6 +136,10 @@ bool dpp35_construct( (const struct dcn3_dpp_mask *)(tf_mask)); dpp->base.funcs = &dcn35_dpp_funcs; + + // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs + if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10) + dpp->dispclk_r_gate_disable = true; return ret; } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c index 5105fd580017..2f92e7d4981b 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c @@ -1091,7 +1091,8 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base, /* ISHARP_DELTA_LUT */ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta); dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level; - dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta; + memcpy(dpp->scl_data.dscl_prog_data.isharp_delta, scl_data->dscl_prog_data.isharp_delta, + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) return; diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h index bd98b327a6c7..b86347c9b038 100644 --- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h @@ -63,10 +63,6 @@ bool cm3_helper_translate_curve_to_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint); -bool cm3_helper_translate_curve_to_degamma_hw_format( - const struct dc_transfer_func *output_tf, - struct pwl_params *lut_params); - bool cm3_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, struct curve_points3 *corner_points, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index f344478e9bd4..b099989d9364 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -443,7 +443,6 @@ struct gpio *dal_gpio_create_irq( case GPIO_ID_GPIO_PAD: break; default: - id = GPIO_ID_HPD; ASSERT_CRITICAL(false); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h index a1e2cde9c4cc..4bd1dda07719 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h @@ -198,6 +198,8 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL1; + uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL2; }; #define HUBBUB_REG_FIELD_LIST_DCN32(type) \ @@ -313,7 +315,12 @@ struct dcn_hubbub_registers { type DCN_VM_ERROR_VMID;\ type DCN_VM_ERROR_TABLE_LEVEL;\ type DCN_VM_ERROR_PIPE;\ - type DCN_VM_ERROR_INTERRUPT_STATUS + type DCN_VM_ERROR_INTERRUPT_STATUS;\ + type DCHUBBUB_TIMEOUT_ERROR_STATUS;\ + type DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD;\ + type DCHUBBUB_TIMEOUT_DETECTION_EN;\ + type DCHUBBUB_TIMEOUT_TIMER_RESET #define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index 37d26fa0b6fb..5d658e9bef64 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1192,6 +1192,17 @@ static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) } } +static void dcn401_program_timeout_thresholds(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* request backpressure and outstanding return threshold (unused)*/ + //REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold); + + /* P-State stall threshold */ + REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold); +} + static const struct hubbub_funcs hubbub4_01_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, @@ -1215,6 +1226,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_det_segments = dcn401_program_det_segments, .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, + .program_timeout_thresholds = dcn401_program_timeout_thresholds, }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h index f35f19ba3e18..5f1960722ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h @@ -123,8 +123,12 @@ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ - HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) - + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_ERROR_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_DETECTION_EN, mask_sh),\ + HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_TIMER_RESET, mask_sh) bool hubbub401_program_urgent_watermarks( struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 4fbed0298adf..81f4c386c287 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1039,7 +1039,8 @@ void dce110_edp_backlight_control( link_transmitter_control(ctx->dc_bios, &cntl); if (enable && link->dpcd_sink_ext_caps.bits.oled && - !link->dc->config.edp_no_power_sequencing) { + !link->dc->config.edp_no_power_sequencing && + !link->local_sink->edid_caps.panel_patch.oled_optimize_display_on) { post_T7_delay += link->panel_config.pps.extra_post_t7_ms; msleep(post_T7_delay); } @@ -3142,9 +3143,10 @@ static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) } bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; struct dc_link *link = pipe_ctx->stream->link; struct dc *dc = link->ctx->dc; struct abm *abm = pipe_ctx->stream_res.abm; @@ -3315,7 +3317,7 @@ void dce110_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index ed3cc3648e8e..06789ac3a224 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -88,8 +88,7 @@ void dce110_edp_wait_for_hpd_ready( bool power_up); bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); void dce110_set_pipe(struct pipe_ctx *pipe_ctx); void dce110_disable_link_output(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index a6a1db5ba8ba..681bb92c6069 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -3453,7 +3453,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index a80c08582932..05424a9af58b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1458,8 +1458,12 @@ void dcn20_pipe_control_lock( } else { if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); - else - pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + else { + if (dc->hwseq->funcs.perform_3dlut_wa_unlock) + dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe); + else + pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + } } } @@ -1732,7 +1736,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.scaler || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change || - plane_state->update_flags.bits.clip_size_change || plane_state->update_flags.bits.per_pixel_alpha_change || pipe_ctx->stream->update_flags.bits.scaling) { pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1745,7 +1748,6 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || - (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { hubp->funcs->mem_program_viewport( @@ -2056,22 +2058,15 @@ void dcn20_program_front_end_for_ctx( */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { - if (dc->hwss.blank_phantom) { - int main_pipe_width = 0, main_pipe_height = 0; - struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - - if (phantom_stream) { - main_pipe_width = phantom_stream->dst.width; - main_pipe_height = phantom_stream->dst.height; - } - - dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); + if (dc->hwseq->funcs.blank_pixel_data) { + dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); } tg->funcs->enable_crtc(tg); } @@ -2255,9 +2250,9 @@ void dcn20_post_unlock_program_front_end( struct timing_generator *tg = pipe->stream_res.tg; - if (tg->funcs->get_double_buffer_pending) { + if (tg->funcs->get_optc_double_buffer_pending) { for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us - && tg->funcs->get_double_buffer_pending(tg); j++) + && tg->funcs->get_optc_double_buffer_pending(tg); j++) udelay(polling_interval_us); } } @@ -2771,7 +2766,6 @@ void dcn20_reset_back_end_for_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { - int i; struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); @@ -2838,19 +2832,16 @@ void dcn20_reset_back_end_for_pipe( } } - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) - break; - - if (i == dc->res_pool->pipe_count) - return; - /* * In case of a dangling plane, setting this to NULL unconditionally * causes failures during reset hw ctx where, if stream is NULL, * it is expected that the pipe_ctx pointers to pipes and plane are NULL. */ pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 1ea95f8d4cbc..61efb15572ff 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -137,7 +137,7 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dpms_off = true; } -static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst) { union dmub_rb_cmd cmd; @@ -199,7 +199,7 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst, @@ -234,7 +234,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, otg_inst, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); @@ -242,14 +242,15 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) } bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst; + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; if (!abm || !tg || !panel_cntl) return false; @@ -257,7 +258,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { - dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); + dce110_set_backlight_level(pipe_ctx, backlight_level_params); return true; } @@ -268,7 +269,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, + dcn21_dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h index 9cee9bdb8de9..f72a27ac1bf1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h @@ -47,11 +47,12 @@ void dcn21_optimize_pwr_state( void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx); +bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, + uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst); void dcn21_set_pipe(struct pipe_ctx *pipe_ctx); void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx); bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); bool dcn21_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index bded33575493..e89ebfda4873 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -245,6 +245,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; int acquired_rmu = 0; @@ -283,8 +284,14 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); + result = mpc->funcs->program_shaper(mpc, shaper_lut, stream->lut3d_func->state.bits.rmu_mux_num); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); + } else { // loop through the available mux and release the requested mpcc_id mpc->funcs->release_rmu(mpc, mpcc_id); @@ -486,7 +493,6 @@ bool dcn30_mmhubbub_warmup( } /*following is the original: warmup each DWB's mcif buffer*/ for (i = 0; i < num_dwb; i++) { - dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst]; /*warmup is for VM mode only*/ if (wb_info[i].mcif_buf_params.p_vmid == 0) @@ -1185,3 +1191,30 @@ void dcn30_prepare_bandwidth(struct dc *dc, if (!dc->clk_mgr->clks.fw_based_mclk_switching) dc_dmub_srv_p_state_delegate(dc, false, context); } + +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + bool pending_updates = false; + unsigned int i; + + if (tg && tg->funcs->is_tg_enabled(tg)) { + // Poll for 100ms maximum + for (i = 0; i < 100000; i++) { + pending_updates = false; + if (tg->funcs->get_optc_double_buffer_pending) + pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg); + + if (tg->funcs->get_otg_double_buffer_pending) + pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg); + + if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state) + pending_updates |= tg->funcs->get_pipe_update_pending(tg); + + if (!pending_updates) + break; + + udelay(1); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 6a153e7ce910..4b90b781c4f2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -96,4 +96,6 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 2a8dc40d2847..0e8d32e3dbae 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -108,7 +108,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .is_abm_supported = dcn21_is_abm_supported + .is_abm_supported = dcn21_is_abm_supported, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 93e49d87a67c..780ce4c064aa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c @@ -107,6 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h index 0bca48ccbfa2..a6e0115a53ee 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h @@ -23,8 +23,8 @@ * */ -#ifndef __DC_DCN30_INIT_H__ -#define __DC_DCN30_INIT_H__ +#ifndef __DC_DCN301_INIT_H__ +#define __DC_DCN301_INIT_H__ struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3d4b31bd9946..03ba01f4ace1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -47,9 +47,11 @@ #include "dce/dmub_outbox.h" #include "link.h" #include "dcn10/dcn10_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" #include "dce/dce_i2c_hw.h" +#include "dce/dmub_abm_lcd.h" #define DC_LOGGER_INIT(logger) @@ -517,10 +519,18 @@ static void dcn31_reset_back_end_for_pipe( dc->hwss.set_abm_immediate_disable(pipe_ctx); + link = pipe_ctx->stream->link; + + if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) && + (link->connector_signal == SIGNAL_TYPE_EDP)) + dc->hwss.blank_stream(pipe_ctx); + pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) pipe_ctx->stream_res.tg->funcs->set_odm_bypass( @@ -532,7 +542,6 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - link = pipe_ctx->stream->link; /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second @@ -633,3 +642,51 @@ void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } + +static void dmub_abm_set_backlight(struct dc_context *dc, + struct set_backlight_level_params *backlight_level_params, uint32_t panel_inst) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_set_backlight.header.type = DMUB_CMD__ABM; + cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; + cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = backlight_level_params->frame_ramp; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_level_params->backlight_pwm_u16_16; + cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type = + (enum dmub_backlight_control_type) backlight_level_params->control_type; + cmd.abm_set_backlight.abm_set_backlight_data.min_luminance = backlight_level_params->min_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.max_luminance = backlight_level_params->max_luminance; + cmd.abm_set_backlight.abm_set_backlight_data.min_backlight_pwm = backlight_level_params->min_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.max_backlight_pwm = backlight_level_params->max_backlight_pwm; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); + cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *backlight_level_params) +{ + struct dc_context *dc = pipe_ctx->stream->ctx; + struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; + + dcn21_dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index b8bc939da155..0d09aa8cfb65 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -51,6 +51,8 @@ int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_ void dcn31_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); +bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, + struct set_backlight_level_params *params); bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); @@ -59,5 +61,4 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); - #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 56f3c70d4b55..5f8f45b48720 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 4e93eeedfc1b..9b88eb72086d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider( } } +static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe) +{ + return pipe && pipe->stream + // Check dig's otg instance. + && pipe->stream_res.stream_enc + && pipe->stream_res.stream_enc->funcs->dig_source_otg + && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc) + && pipe->stream->link && pipe->stream->link->link_enc + && pipe->stream->link->link_enc->funcs->is_dig_enabled + && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc) + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled + && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc); +} + void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx) { unsigned int i; @@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) && + !pipe->stream->apply_seamless_boot_optimization && + !pipe->stream->apply_edp_fast_boot_optimization) { + if (dcn314_is_pipe_dig_fifo_on(pipe)) + continue; pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -478,7 +496,7 @@ void dcn314_disable_link_output(struct dc_link *link, * from enable/disable link output and only call edp panel control * in enable_link_dp and disable_link_dp once. */ - if (dmcu != NULL && dmcu->funcs->lock_phy) + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index 68e6de6b5758..6bdfbf22ce87 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 2e8c9f738259..d7f8b2dcaa6b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -439,6 +439,7 @@ bool dcn32_set_mpc_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = false; @@ -458,13 +459,13 @@ bool dcn32_set_mpc_shaper_3dlut( if (stream->lut3d_func && stream->lut3d_func->state.bits.initialized == 1) { - result = mpc->funcs->program_3dlut(mpc, - &stream->lut3d_func->lut_3d, - mpcc_id); + result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_3dlut failed\n", __func__); - result = mpc->funcs->program_shaper(mpc, - shaper_lut, - mpcc_id); + result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id); + if (!result) + DC_LOG_ERROR("%s: program_shaper failed\n", __func__); } return result; @@ -1398,10 +1399,10 @@ void dcn32_disable_link_output(struct dc_link *link, link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control && + link->dc->hwss.edp_power_control && !link->skip_implict_edp_power_control) link->dc->hwss.edp_power_control(link, false); - else if (dmcu != NULL && dmcu->funcs->lock_phy) + else if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); @@ -1698,52 +1699,6 @@ void dcn32_init_blank( hws->funcs.wait_for_blank_complete(opp); } -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height) -{ - struct dce_hwseq *hws = dc->hwseq; - enum dc_color_space color_space; - struct tg_color black_color = {0}; - struct output_pixel_processor *opp = NULL; - uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; - uint32_t i; - - /* program opp dpg blank color */ - color_space = COLOR_SPACE_SRGB; - color_space_to_black_color(dc, color_space, &black_color); - - otg_active_width = width; - otg_active_height = height; - - /* get the OPTC source */ - tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); - ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); - - for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { - if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { - opp = dc->res_pool->opps[i]; - break; - } - } - - if (opp && opp->funcs->opp_set_disp_pattern_generator) - opp->funcs->opp_set_disp_pattern_generator( - opp, - CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, - CONTROLLER_DP_COLOR_SPACE_UDEFINED, - COLOR_DEPTH_UNDEFINED, - &black_color, - otg_active_width, - otg_active_height, - 0); - - if (tg->funcs->is_tg_enabled(tg)) - hws->funcs.wait_for_blank_complete(opp); -} - /* phantom stream id's can change often, but can be identical between contexts. * This function checks for the condition the streams are identical to avoid * redundant pipe transitions. diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cac4a08b92a4..0303a5953673 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -119,11 +119,6 @@ void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); -void dcn32_blank_phantom(struct dc *dc, - struct timing_generator *tg, - int width, - int height); - bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 3422b564ae98..5ecee7e320da 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -98,7 +98,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, @@ -117,10 +117,10 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, .program_outstanding_updates = dcn32_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index bd309dbdf7b2..e599cdc465bf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -309,6 +309,7 @@ void dcn35_init_hw(struct dc *dc) dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; + dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support; } if (dc->res_pool->pg_cntl) { @@ -841,6 +842,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) uint32_t num_opps = 0; uint32_t opp_id_src0 = OPP_ID_INVALID; uint32_t opp_id_src1 = OPP_ID_INVALID; + uint32_t optc_dsc_state = 0; // Step 1: To find out which OPTC is running & OPTC DSC is ON // We can't use res_pool->res_cap->num_timing_generator to check @@ -849,7 +851,6 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // Some ASICs would be fused display pipes less than the default setting. // In dcnxx_resource_construct function, driver would obatin real information. for (i = 0; i < dc->res_pool->timing_generator_count; i++) { - uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i]; if (tg->funcs->is_tg_enabled(tg)) { @@ -864,15 +865,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) } } - // Step 2: To power down DSC but skip DSC of running OPTC + // Step 2: To power down DSC but skip DSC of running OPTC for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0}; - dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + /* avoid reading DSC state when it is not in use as it may be power gated */ + if (optc_dsc_state) { + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); - if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && - s.dsc_clock_en && s.dsc_fw_en) - continue; + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + } pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 2bbf1fef94fd..fd67779c27a9 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -101,7 +101,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -123,7 +123,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index d00822e8daa5..3c275a1eff58 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -100,7 +100,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, @@ -122,7 +122,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .root_clock_control = dcn35_root_clock_control, .set_long_vtotal = dcn35_set_long_vblank, .calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider, - .program_outstanding_updates = dcn32_program_outstanding_updates, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 0b743669f23b..e8cc1bfa73f3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -506,7 +506,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); /* 1D LUT */ - if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) { + if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; @@ -521,7 +521,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id); } if (mpc->funcs->program_lut_mode) - mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id); + mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id); } /* Shaper */ @@ -669,11 +669,17 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; int mpcc_id = pipe_ctx->plane_res.hubp->inst; - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; + struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc; + struct mpc *mpc = dc->res_pool->mpc; bool result; const struct pwl_params *lut_params = NULL; bool rval; + if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { + dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a); + return true; + } + mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; // 1D LUT @@ -844,6 +850,13 @@ enum dc_status dcn401_enable_stream_timing( odm_slice_width, last_odm_slice_width); } + /* set DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { + if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); + } + } + /* HW program guide assume display already disable * by unplug sequence. OTG assume stop. */ @@ -1004,8 +1017,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } else { - /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */ - dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst); dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } @@ -1063,7 +1074,6 @@ static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) r2 = test_pipe->plane_res.scl_data.recout; r2_r = r2.x + r2.width; r2_b = r2.y + r2.height; - split_pipe = test_pipe; /** * There is another half plane on same layer because of @@ -1097,6 +1107,58 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct } } +static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding) +{ + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + uint8_t i; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + pipe_ctx->clock_source->funcs->program_pix_clk( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + link_encoding, + &pipe_ctx->pll_settings); + break; + } + } +} + +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_backlight_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + + if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) { + disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING); + link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; + } else { + link_hwss->disable_link_output(link, link_res, signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + if (signal == SIGNAL_TYPE_EDP && + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) + link->dc->hwss.edp_power_control(link, false); + else if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->unlock_phy(dmcu); + + dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +} + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -1492,6 +1554,11 @@ void dcn401_optimize_bandwidth( pipe_ctx->dlg_regs.min_dst_y_next_start); } } + + /* update timeout thresholds */ + if (hubbub->funcs->program_timeout_thresholds) { + hubbub->funcs->program_timeout_thresholds(hubbub, &context->bw_ctx.bw.dcn.arb_regs); + } } void dcn401_fams2_global_control_lock(struct dc *dc, @@ -1669,7 +1736,7 @@ void dcn401_hardware_release(struct dc *dc) } } -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) { struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; @@ -1695,6 +1762,9 @@ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, hubbub->funcs->wait_for_det_update) hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); } + } else { + if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update) + hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst); } } } @@ -1705,7 +1775,6 @@ void dcn401_interdependent_update_lock(struct dc *dc, unsigned int i = 0; struct pipe_ctx *pipe = NULL; struct timing_generator *tg = NULL; - bool pipe_unlocked[MAX_PIPES] = {0}; if (lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1719,48 +1788,91 @@ void dcn401_interdependent_update_lock(struct dc *dc, dc->hwss.pipe_control_lock(dc, pipe, true); } } else { - /* Unlock pipes based on the change in DET allocation instead of pipe index - * Prevents over allocation of DET during unlock process - * e.g. 2 pipe config with different streams with a max of 20 DET segments - * Before: After: - * - Pipe0: 10 DET segments - Pipe0: 12 DET segments - * - Pipe1: 10 DET segments - Pipe1: 8 DET segments - * If Pipe0 gets updated first, 22 DET segments will be allocated - */ + /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; - int current_pipe_idx = i; if (!resource_is_pipe_type(pipe, OTG_MASTER) || !tg->funcs->is_tg_enabled(tg) || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { - pipe_unlocked[i] = true; continue; } - // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared - struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream); - - if (old_otg_master) - current_pipe_idx = old_otg_master->pipe_idx; - if (resource_calculate_det_for_stream(context, pipe) < - resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) { + if (dc->scratch.pipes_to_unlock_first[i]) { + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; dc->hwss.pipe_control_lock(dc, pipe, false); - pipe_unlocked[i] = true; - dcn401_wait_for_det_buffer_update(dc, context, pipe); + /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/ + dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe); } } + /* Unlocking the rest of the pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (pipe_unlocked[i]) + if (dc->scratch.pipes_to_unlock_first[i]) continue; + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + continue; + } + dc->hwss.pipe_control_lock(dc, pipe, false); } } } +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx) +{ + /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that + * HUBP will properly fetch 3DLUT contents after unlock. + * + * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless + * of whether OTG lock is currently being held or not. + */ + struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; + struct pipe_ctx *odm_pipe, *mpc_pipe; + int i, wa_pipe_ct = 0; + + for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { + for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { + if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src + == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM + && mpc_pipe->plane_state->mcm_shaper_3dlut_setting + == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) { + wa_pipes[wa_pipe_ct++] = mpc_pipe; + } + } + } + + if (wa_pipe_ct > 0) { + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) + pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); + + for (i = 0; i < wa_pipe_ct; ++i) { + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); + } + + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); + } else { + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + } +} + void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context) { @@ -1770,3 +1882,125 @@ void dcn401_program_outstanding_updates(struct dc *dc, if (hubbub->funcs->program_compbuf_segments) hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); } + +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct dc_link *link = pipe_ctx->stream->link; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + DC_LOGGER_INIT(dc->ctx->logger); + if (pipe_ctx->stream_res.stream_enc == NULL) { + pipe_ctx->stream = NULL; + return; + } + + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; + } + } + + /* by upper caller loop, parent pipe: pipe0, will be reset last. + * back end share by all pipes and will be disable only when disable + * parent pipe. + */ + if (pipe_ctx->top_pipe == NULL) { + + dc->hwss.set_abm_immediate_disable(pipe_ctx); + + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); + if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) + pipe_ctx->stream_res.tg->funcs->set_odm_bypass( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, NULL); + /* TODO - convert symclk_ref_cnts for otg to a bit map to solve + * the case where the same symclk is shared across multiple otg + * instances + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link->phy_state.symclk_ref_cnts.otg = 0; + if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { + link_hwss->disable_link_output(link, + &pipe_ctx->link_res, pipe_ctx->stream->signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + /* reset DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); + } + +/* + * In case of a dangling plane, setting this to NULL unconditionally + * causes failures during reset hw ctx where, if stream is NULL, + * it is expected that the pipe_ctx pointers to pipes and plane are NULL. + */ + pipe_ctx->stream = NULL; + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", + pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); +} + +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context) +{ + int i; + struct dce_hwseq *hws = dc->hwseq; + + /* Reset Back End*/ + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index a27e62081685..28a513dfc005 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -55,6 +55,10 @@ void dcn401_populate_mcm_luts(struct dc *dc, bool lut_bank_a); void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn401_disable_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx); bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable); @@ -81,7 +85,16 @@ void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); -void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context); +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index a2ca07235c83..23e4f208152e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -77,14 +77,14 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .calc_vupdate_position = dcn10_calc_vupdate_position, .apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations, .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, + .set_backlight_level = dcn31_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .hardware_release = dcn401_hardware_release, .set_pipe = dcn21_set_pipe, .enable_lvds_link_output = dce110_enable_lvds_link_output, .enable_tmds_link_output = dce110_enable_tmds_link_output, .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn32_disable_link_output, + .disable_link_output = dcn401_disable_link_output, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .enable_phantom_streams = dcn32_enable_phantom_streams, @@ -93,13 +93,13 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .update_phantom_vp_position = dcn32_update_phantom_vp_position, .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, .fams2_global_control_lock = dcn401_fams2_global_control_lock, .fams2_update_config = dcn401_fams2_update_config, .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, + .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, }; static const struct hwseq_private_funcs dcn401_private_funcs = { @@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, @@ -136,8 +136,9 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, - .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, + .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, + .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index ac9205625623..66fdc5805d0a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -365,8 +365,7 @@ struct hw_sequencer_funcs { void (*clear_status_bits)(struct dc *dc, unsigned int mask); bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *params); void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx); @@ -462,6 +461,7 @@ struct hw_sequencer_funcs { void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); + void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx); }; void color_space_to_black_color( @@ -504,6 +504,10 @@ void get_mclk_switch_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_cursor_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void set_p_state_switch_method( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 0ac675456979..22a5d4a03c98 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -182,6 +182,7 @@ struct hwseq_private_funcs { struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, bool lut_bank_a); + void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fa5edd03d004..b5afd8c3103d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -60,5 +60,7 @@ enum dc_status { }; char *dc_status_to_str(enum dc_status status); +char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding); +char *dc_color_depth_to_str(enum dc_color_depth color_depth); #endif /* _CORE_STATUS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bfb8b8502d20..8597e866bfe6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -215,6 +215,10 @@ struct resource_funcs { void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); + /* + * Get indicator of power from a context that went through full validation + */ + int (*get_power_profile)(const struct dc_state *context); }; struct audio_support{ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 67c32401893e..6c1d41c0f099 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -228,6 +228,7 @@ struct hubbub_funcs { void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); + void (*program_timeout_thresholds)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 3d4c8bd42b49..b74e18cc1e66 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -342,7 +342,11 @@ struct timing_generator_funcs { void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); - bool (*get_double_buffer_pending)(struct timing_generator *tg); + bool (*get_optc_double_buffer_pending)(struct timing_generator *tg); + bool (*get_otg_double_buffer_pending)(struct timing_generator *tg); + bool (*get_pipe_update_pending)(struct timing_generator *tg); + void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); + bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 72a8479e1f2d..f04292086c08 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -248,8 +248,7 @@ struct link_service { uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool (*edp_set_backlight_level)(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool (*edp_set_backlight_level_nits)(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d21ee9d12d26..e026c728042a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -48,6 +48,9 @@ #include "dm_helpers.h" #include "clk_mgr.h" + // Offset DPCD 050Eh == 0x5A +#define MST_HUB_ID_0x5A 0x5A + #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -692,6 +695,15 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) link->wa_flags.dpia_mst_dsc_always_on = true; + + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_vendor_specific_data[2] == MST_HUB_ID_0x5A && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) { + link->wa_flags.dpia_mst_dsc_always_on = true; + } } static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index c4e03482ba9a..41cab9ad6885 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2082,6 +2082,9 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link_settings->link_rate == LINK_RATE_LOW) skip_video_pattern = false; + if (stream->sink_patches.oled_optimize_display_on) + set_default_brightness_aux(link); + if (perform_link_training_with_retries(link_settings, skip_video_pattern, lt_attempts, @@ -2105,10 +2108,14 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); - if (link->dpcd_sink_ext_caps.bits.oled == 1) - msleep(bl_oled_enable_delay); - edp_backlight_enable_aux(link, true); + if (!stream->sink_patches.oled_optimize_display_on) { + set_default_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + edp_backlight_enable_aux(link, true); + } else { + edp_backlight_enable_aux(link, true); + } } return status; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index d78c8ec4de79..9dabaf682171 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -51,9 +51,10 @@ #include "dc_dmub_srv.h" #include "gpio_service_interface.h" +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + #define DC_LOGGER \ link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) @@ -1207,6 +1208,13 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } + + core_link_read_dpcd( + link, + DP_BRANCH_VENDOR_SPECIFIC_START, + (uint8_t *)link->dpcd_caps.branch_vendor_specific_data, + sizeof(link->dpcd_caps.branch_vendor_specific_data)); + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; @@ -1625,7 +1633,11 @@ static bool retrieve_link_cap(struct dc_link *link) } /* Read DP tunneling information. */ - status = dpcd_get_tunneling_device_data(link); + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dpcd_get_tunneling_device_data(link); + if (status != DC_OK) + dm_error("%s: Read tunneling device data failed.\n", __func__); + } dpcd_set_source_specific_data(link); /* Sink may need to configure internals based on vendor, so allow some @@ -1842,6 +1854,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_FEC_CAPABILITY, &link->dpcd_caps.fec_cap.raw, sizeof(link->dpcd_caps.fec_cap.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_FEC_CAPABILITY) failed\n", __func__, __LINE__); + status = core_link_read_dpcd( link, DP_DSC_SUPPORT, @@ -1864,6 +1879,9 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_DSC_BRANCH_OVERALL_THROUGHPUT_0) failed\n", __func__, __LINE__); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); @@ -2055,6 +2073,14 @@ void detect_edp_sink_caps(struct dc_link *link) core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, &link->dpcd_caps.pr_info.max_deviation_line, sizeof(link->dpcd_caps.pr_info.max_deviation_line)); + + /* + * OLED Emission Rate info + */ + if (link->dpcd_sink_ext_caps.bits.emission_output) + core_link_read_dpcd(link, DP_SINK_EMISSION_RATE, + (uint8_t *)&link->dpcd_caps.edp_oled_emission_rate, + sizeof(link->dpcd_caps.edp_oled_emission_rate)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 6af42ba9885c..0d123e647652 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -59,12 +59,18 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) dpcd_dp_tun_data, sizeof(dpcd_dp_tun_data)); + if (status != DC_OK) + goto err; + status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, dpcd_topology_data, sizeof(dpcd_topology_data)); + if (status != DC_OK) + goto err; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = @@ -75,6 +81,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; +err: return status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 96bf135b6f05..48abeaa88678 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -221,21 +221,11 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) &replay_error_status.raw, sizeof(replay_error_status.raw)); - link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = - replay_error_status.bits.LINK_CRC_ERROR; - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = - replay_configuration.bits.DESYNC_ERROR_STATUS; - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = - replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; - - if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || - link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || - link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + if (replay_error_status.bits.LINK_CRC_ERROR || + replay_configuration.bits.DESYNC_ERROR_STATUS || + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) { bool allow_active; - if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR) - link->replay_settings.config.received_desync_error_hpd = 1; - if (link->replay_settings.config.force_disable_desync_error_check) return; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 27b881f947e8..754c895e1bfb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -272,7 +272,7 @@ void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { - fsleep(wait_in_micro_secs); + usleep_range_state(wait_in_micro_secs, wait_in_micro_secs, TASK_UNINTERRUPTIBLE); DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -1107,9 +1107,13 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_DOWNSPREAD_CTRL) failed\n", __func__, __LINE__); status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LANE_COUNT_SET) failed\n", __func__, __LINE__); if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { @@ -1125,12 +1129,19 @@ enum dc_status dpcd_set_link_settings( supported_link_rates, sizeof(supported_link_rates)); } status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_RATE_SET) failed\n", __func__, __LINE__); } else { rate = get_dpcd_link_rate(<_settings->link_settings); status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + if (status != DC_OK) + DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__); } if (rate) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index b5cf75975fff..ccf8096dde29 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -412,7 +412,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3aa05a2be6c0..e0e3bb865359 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -157,31 +157,13 @@ bool edp_set_backlight_level_nits(struct dc_link *link, uint32_t backlight_millinits, uint32_t transition_time_in_ms) { - struct dpcd_source_backlight_set dpcd_backlight_set; - uint8_t backlight_control = isHDR ? 1 : 0; - if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - // OLEDs have no PWM, they can only use AUX - if (link->dpcd_sink_ext_caps.bits.oled == 1) - backlight_control = 1; - - *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; - *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - - - if (!link->dpcd_caps.panel_luminance_control) { - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *)(&dpcd_backlight_set), - sizeof(dpcd_backlight_set)) != DC_OK) - return false; - - if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, - &backlight_control, 1) != DC_OK) - return false; - } else { + // use internal backlight control if dmub capabilities are not present + if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && + !link->dc->caps.dmub_caps.aux_backlight_support) { uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; @@ -205,6 +187,24 @@ bool edp_set_backlight_level_nits(struct dc_link *link, (uint8_t *)(target_luminance), sizeof(struct target_luminance_value)) != DC_OK) return false; + } else if (link->backlight_control_type == BACKLIGHT_CONTROL_AMD_AUX) { + struct dpcd_source_backlight_set dpcd_backlight_set; + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + uint8_t backlight_control = isHDR ? 1 : 0; + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; } return true; @@ -519,11 +519,11 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) } bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp) + struct set_backlight_level_params *backlight_level_params) { struct dc *dc = link->ctx->dc; - + uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; + uint32_t frame_ramp = backlight_level_params->frame_ramp; DC_LOGGER_INIT(link->ctx->logger); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); @@ -544,10 +544,11 @@ bool edp_set_backlight_level(const struct dc_link *link, return false; } + backlight_level_params->frame_ramp = frame_ramp; + dc->hwss.set_backlight_level( pipe_ctx, - backlight_pwm_u16_16, - frame_ramp); + backlight_level_params); } return true; } @@ -940,8 +941,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream struct replay_context replay_context = { 0 }; unsigned int lineTimeInNs = 0; - - union replay_enable_and_configuration replay_config; + union replay_enable_and_configuration replay_config = { 0 }; union dpcd_alpm_configuration alpm_config; @@ -1168,9 +1168,6 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; if (link_res->hpo_dp_link_enc) { - if (link->wa_flags.disable_assr_for_uhbr) - return; - link_enc_index = link_res->hpo_dp_link_enc->inst; use_hpo_dp_link_enc = true; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 30dc8c24c008..bcfa6ac5d4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -36,8 +36,7 @@ bool edp_get_backlight_level_nits(struct dc_link *link, uint32_t *backlight_millinits_avg, uint32_t *backlight_millinits_peak); bool edp_set_backlight_level(const struct dc_link *link, - uint32_t backlight_pwm_u16_16, - uint32_t frame_ramp); + struct set_backlight_level_params *backlight_level_params); bool edp_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index b7a57f98553d..40757f20d73f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -202,6 +202,7 @@ struct dcn_optc_registers { uint32_t OPTC_CLOCK_CONTROL; uint32_t OPTC_WIDTH_CONTROL2; uint32_t OTG_PSTATE_REGISTER; + uint32_t OTG_PIPE_UPDATE_STATUS; }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -566,6 +567,12 @@ struct dcn_optc_registers { type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\ type OPTC_DOUBLE_BUFFER_PENDING;\ +#define TG_REG_FIELD_LIST_DCN2_0(type) \ + type OTG_FLIP_PENDING;\ + type OTG_DC_REG_UPDATE_PENDING;\ + type OTG_CURSOR_UPDATE_PENDING;\ + type OTG_VUPDATE_KEEPOUT_STATUS;\ + #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; @@ -600,6 +607,7 @@ struct dcn_optc_registers { struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) + TG_REG_FIELD_LIST_DCN2_0(uint8_t) TG_REG_FIELD_LIST_DCN3_2(uint8_t) TG_REG_FIELD_LIST_DCN3_5(uint8_t) TG_REG_FIELD_LIST_DCN401(uint8_t) @@ -607,6 +615,7 @@ struct dcn_optc_shift { struct dcn_optc_mask { TG_REG_FIELD_LIST(uint32_t) + TG_REG_FIELD_LIST_DCN2_0(uint32_t) TG_REG_FIELD_LIST_DCN3_2(uint32_t) TG_REG_FIELD_LIST_DCN3_5(uint32_t) TG_REG_FIELD_LIST_DCN401(uint32_t) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h index 364034b19028..928e110b95fb 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -43,7 +43,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SR(DWB_SOURCE_SELECT),\ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ @@ -53,6 +54,10 @@ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index abcd03d78668..4c95c0958612 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c @@ -271,6 +271,48 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c optc1->opp_count = opp_cnt; } +/* OTG status register that indicates OPTC update is pending */ +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OPTC_INPUT_GLOBAL_CONTROL, + OPTC_DOUBLE_BUFFER_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates OTG update is pending */ +bool optc3_get_otg_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t update_pending = 0; + + REG_GET(OTG_DOUBLE_BUFFER_CONTROL, + OTG_UPDATE_PENDING, + &update_pending); + + return (update_pending == 1); +} + +/* OTG status register that indicates surface update is pending */ +bool optc3_get_pipe_update_pending(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t flip_pending = 0; + uint32_t dc_update_pending = 0; + + REG_GET_2(OTG_PIPE_UPDATE_STATUS, + OTG_FLIP_PENDING, + &flip_pending, + OTG_DC_REG_UPDATE_PENDING, + &dc_update_pending); + + return (flip_pending == 1 || dc_update_pending == 1); +} + /** * optc3_set_timing_double_buffer() - DRR double buffering control * @@ -375,6 +417,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn30_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h index bda974d432ea..e2303f9eaf13 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h @@ -109,7 +109,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SR(DWB_SOURCE_SELECT) + SR(DWB_SOURCE_SELECT),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define DCN30_VTOTAL_REGS_SF(mask_sh) @@ -209,6 +210,7 @@ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ @@ -319,7 +321,11 @@ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn30_timing_generator_init(struct optc *optc1); @@ -356,4 +362,7 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); void optc3_tg_init(struct timing_generator *optc); void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); +bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc); +bool optc3_get_otg_update_pending(struct timing_generator *optc); +bool optc3_get_pipe_update_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index 1a22ae89fb55..d7a45ef2d01b 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c @@ -169,6 +169,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_hw_timing = optc1_get_hw_timing, .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn301_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 30b81a448ce2..fbbe86d00c2e 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -99,7 +99,8 @@ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SRI(OTG_CRC_CNTL2, OTG, inst),\ SR(DWB_SOURCE_SELECT),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -254,7 +255,11 @@ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn31_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h index 99c098e76116..0ff72b97b465 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h @@ -98,7 +98,8 @@ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI(OTG_DRR_CONTROL, OTG, inst) + SRI(OTG_DRR_CONTROL, OTG, inst),\ + SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ @@ -248,7 +249,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn314_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 00094f0e8470..c217f653b3c8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -297,18 +297,6 @@ static void optc32_set_drr( optc32_setup_manual_trigger(optc); } -bool optc32_get_double_buffer_pending(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t update_pending = 0; - - REG_GET(OPTC_INPUT_GLOBAL_CONTROL, - OPTC_DOUBLE_BUFFER_PENDING, - &update_pending); - - return (update_pending == 1); -} - static struct timing_generator_funcs dcn32_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -373,7 +361,9 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 665d7c52f67c..0b0964a9da74 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -177,7 +177,11 @@ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn32_timing_generator_init(struct optc *optc1); void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); @@ -185,6 +189,5 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); -bool optc32_get_double_buffer_pending(struct timing_generator *optc); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index d077e2392379..be749ab41dce 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -67,7 +67,11 @@ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ - SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index a5d6a7dca554..783ca9acc762 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -430,6 +430,35 @@ static void optc401_program_global_sync( REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout); } +static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, optc1->vready_offset + 10, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, enable); + + return; +} + +static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + uint32_t lock_status = 0; + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, locked, + 1, 150000); + + REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &lock_status); + + if (lock_status != locked) + return false; + + return true; +} + static struct timing_generator_funcs dcn401_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -493,7 +522,11 @@ static struct timing_generator_funcs dcn401_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, - .get_double_buffer_pending = optc32_get_double_buffer_pending, + .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending, + .get_otg_double_buffer_pending = optc3_get_otg_update_pending, + .get_pipe_update_pending = optc3_get_pipe_update_pending, + .set_vupdate_keepout = optc401_set_vupdate_keepout, + .wait_update_lock_status = optc401_wait_update_lock_status, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h index bb13a645802d..1be89571986f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h @@ -159,7 +159,11 @@ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\ SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\ - SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh) + SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh) void dcn401_timing_generator_init(struct optc *optc1); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 53a5f4cb648c..e698543ec937 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -623,7 +623,7 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index 91da5cf85b69..035c6cfdaee5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -668,7 +668,7 @@ static struct link_encoder *dce110_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 162856c523e4..480a50967385 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -629,7 +629,7 @@ static struct link_encoder *dce112_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 621825a51f46..c63c59623433 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -706,7 +706,7 @@ static struct link_encoder *dce120_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index a73d3c6ef425..3d5113f010bb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -723,7 +723,7 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110) + if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 563c5eec83ff..770a380cc03d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -533,7 +533,6 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, /* raven smu dones't allow 0 disp clk, @@ -560,18 +559,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_stutter = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, -}; - static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -751,7 +738,7 @@ static struct link_encoder *dcn10_link_encoder_create( kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc10) + if (!enc10 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -1400,8 +1387,6 @@ static bool dcn10_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index eea2b3b307cd..189d0c85872e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -706,7 +706,6 @@ static const struct resource_caps res_cap_nv14 = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, @@ -920,7 +919,7 @@ struct link_encoder *dcn20_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index fc54483b9104..d3d67d366523 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -600,7 +600,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -797,7 +796,7 @@ static struct link_encoder *dcn201_link_encoder_create( kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); struct dcn10_link_encoder *enc10; - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; enc10 = &enc20->enc10; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 347e6aaea582..021ba8ac5c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -610,7 +610,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = false, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, @@ -1298,7 +1297,7 @@ static struct link_encoder *dcn21_link_encoder_create( kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc21) + if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 5040a4c6ed18..cd31e4f16c14 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -711,7 +711,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, //No DMCU on DCN30 .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -927,7 +926,7 @@ static struct link_encoder *dcn30_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 7d04739c3ba1..a9816affd312 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -682,7 +682,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -883,7 +882,7 @@ static struct link_encoder *dcn301_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn301_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 5791b5cc2875..02af8b8f4d27 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -81,7 +81,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -893,7 +892,7 @@ static struct link_encoder *dcn302_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 63f0f882c861..7002a8dd358a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -82,7 +82,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -839,7 +838,7 @@ static struct link_encoder *dcn303_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index ac8cb20e2e3b..c16cf1c8f7f9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -858,7 +858,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -869,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1093,7 +1092,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 169924d0a839..c0f48c78e968 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -876,7 +876,6 @@ static const struct dc_debug_options debug_defaults_drv = { .replay_skip_crtc_disabled = true, .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_dpp_power_gate = false, .disable_hubp_power_gate = false, @@ -889,7 +888,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 4096,/*upto true 4k*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, - .sanity_checks = true, + .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, @@ -1149,7 +1148,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 3f4b9dba4112..6c3295259a81 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -858,7 +858,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1091,7 +1090,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, @@ -1812,6 +1811,11 @@ static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } +static int dcn315_get_power_profile(const struct dc_state *context) +{ + return !context->bw_ctx.bw.dcn.clk.p_state_change_support; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; @@ -1840,6 +1844,7 @@ static struct resource_funcs dcn315_res_pool_funcs = { .update_bw_bounding_box = dcn315_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn315_get_panel_config_defaults, + .get_power_profile = dcn315_get_power_profile, }; static bool dcn315_resource_construct( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 5fd52c5fcee4..6edaaadcb173 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -853,7 +853,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, /*hw not support it*/ .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_DYNAMIC, @@ -1085,7 +1084,7 @@ static struct link_encoder *dcn31_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; dcn31_link_encoder_construct(enc20, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index a124ad9bd108..01d1a11d5545 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -689,7 +689,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore @@ -1039,7 +1038,7 @@ static struct link_encoder *dcn32_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1990,6 +1989,10 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned return 0; } + if (dc->caps.max_cab_allocation_bytes == 0) { + return 0xffffffff; + } + /* add 2 lines for worst case alignment */ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 7901792afb7b..86c6e5e8c42e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -1054,7 +1054,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBP */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 827a94f84f10..5cb74fd9cb7d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -686,7 +686,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1035,7 +1034,7 @@ static struct link_encoder *dcn321_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 893a9d9ee870..6cc2960b6104 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -712,7 +712,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1074,7 +1073,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 70abd32ce2ad..d87e2641cda1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -692,7 +692,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1054,7 +1053,7 @@ static struct link_encoder *dcn35_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 9d56fbdcd06a..db93bac247c0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -685,7 +685,6 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, - .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, .pipe_split_policy = MPC_SPLIT_AVOID, @@ -1032,7 +1031,7 @@ static struct link_encoder *dcn401_link_encoder_create( struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) + if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; #undef REG_STRUCT @@ -1579,7 +1578,8 @@ static void dcn401_destroy_resource_pool(struct resource_pool **pool) } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -1688,6 +1688,45 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) } } +static int dcn401_get_power_profile(const struct dc_state *context) +{ + int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000; + int dpm_level = 0; + + for (int i = 0; i < context->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { + if (context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz == 0 || + uclk_mhz < context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + break; + if (uclk_mhz > context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) + dpm_level++; + } + + return dpm_level; +} + +static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans) +{ + unsigned int num_available_chans = 1; + + /* channels for MALL must be a power of 2 */ + while (num_chans > 1) { + num_available_chans = (num_available_chans << 1); + num_chans = (num_chans >> 1); + } + + /* cannot be odd */ + num_available_chans &= ~1; + + /* clamp to max available channels for MALL per ASIC */ + if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 16 ? 16 : num_available_chans; + } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) { + num_available_chans = num_available_chans > 8 ? 8 : num_available_chans; + } + + return num_available_chans; +} + static struct resource_funcs dcn401_res_pool_funcs = { .destroy = dcn401_destroy_resource_pool, .link_enc_create = dcn401_link_encoder_create, @@ -1714,6 +1753,7 @@ static struct resource_funcs dcn401_res_pool_funcs = { .prepare_mcache_programming = dcn401_prepare_mcache_programming, .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, + .get_power_profile = dcn401_get_power_profile, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1795,14 +1835,12 @@ static bool dcn401_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; /* Calculate the available MALL space */ - dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( dc, dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel * 1024 * 1024; dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; @@ -1867,6 +1905,7 @@ static bool dcn401_resource_construct( dc->config.prefer_easf = true; dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; + dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2132,6 +2171,7 @@ static bool dcn401_resource_construct( /* SPL */ spl_init_easf_filter_coeffs(); spl_init_blur_scale_coeffs(); + dc->caps.scl_caps.sharpener_support = true; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 514d1ce20df9..7c8d61db153d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -536,8 +536,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ - SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst) + SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \ + SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \ + SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst) /* HUBBUB */ #define HUBBUB_REG_LIST_DCN4_01_RI(id) \ @@ -609,7 +610,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); SR(DCHUBBUB_CLOCK_CNTL), \ SR(DCHUBBUB_SDPIF_CFG0), \ SR(DCHUBBUB_SDPIF_CFG1), \ - SR(DCHUBBUB_MEM_PWR_MODE_CTRL) + SR(DCHUBBUB_MEM_PWR_MODE_CTRL), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL1), \ + SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL2) /* DCCG */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index 014e8a296f0c..614276200aa0 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -848,13 +848,13 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, * surfaces based on policy setting */ if (!spl_is_yuv420(spl_in->basic_in.format) && - (spl_in->debug.sharpen_policy == SHARPEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_YUV)) return enable_isharp; else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) && - (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) + (spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) return enable_isharp; else if (!spl_in->is_fullscreen && - spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL) + spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL) return enable_isharp; /* @@ -868,6 +868,60 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, return enable_isharp; } +/* Calculate number of tap with adaptive scaling off */ +static void spl_get_taps_non_adaptive_scaler( + struct spl_scratch *spl_scratch, const struct spl_taps *in_taps) +{ + if (in_taps->h_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) + spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz), 8); + else + spl_scratch->scl_data.taps.h_taps = 4; + } else + spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) + spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps = 4; + } else + spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; + + if (in_taps->v_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) + spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( + spl_scratch->scl_data.ratios.vert_c, 2)), 8); + else + spl_scratch->scl_data.taps.v_taps_c = 4; + } else + spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; + + if (in_taps->h_taps_c == 0) { + if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) + spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( + spl_scratch->scl_data.ratios.horz_c), 8); + else + spl_scratch->scl_data.taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; + else + spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; + + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) + spl_scratch->scl_data.taps.h_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) + spl_scratch->scl_data.taps.v_taps = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + spl_scratch->scl_data.taps.h_taps_c = 1; + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + spl_scratch->scl_data.taps.v_taps_c = 1; + +} + /* Calculate optimal number of taps */ static bool spl_get_optimal_number_of_taps( int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch, @@ -882,8 +936,22 @@ static bool spl_get_optimal_number_of_taps( if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && - spl_scratch->scl_data.viewport.width > max_downscale_src_width) + spl_scratch->scl_data.viewport.width > max_downscale_src_width) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; return false; + } + + /* Disable adaptive scaler and sharpener when integer scaling is enabled */ + if (spl_in->scaling_quality.integer_scaling) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + *enable_easf_v = false; + *enable_easf_h = false; + *enable_isharp = false; + return true; + } /* Check if we are using EASF or not */ skip_easf = enable_easf(spl_in, spl_scratch); @@ -893,43 +961,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) { - if (in_taps->h_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1) - spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz), 8); - else - spl_scratch->scl_data.taps.h_taps = 4; - } else - spl_scratch->scl_data.taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1) - spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps = 4; - } else - spl_scratch->scl_data.taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1) - spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int( - spl_scratch->scl_data.ratios.vert_c, 2)), 8); - else - spl_scratch->scl_data.taps.v_taps_c = 4; - } else - spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1) - spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil( - spl_scratch->scl_data.ratios.horz_c), 8); - else - spl_scratch->scl_data.taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1; - else - spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c; - } else { + if (skip_easf) + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + else { if (spl_is_yuv420(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; spl_scratch->scl_data.taps.v_taps = 6; @@ -954,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( else lb_config = LB_MEMORY_CONFIG_0; // Determine max vtap support by calculating how much line buffer can fit - spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, + spl_in->callbacks.spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data, lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2) @@ -1590,7 +1624,8 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness, scale_to_sharpness_policy); - dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); + memcpy(dscl_prog_data->isharp_delta, spl_get_pregen_filter_isharp_1D_lut(setup), + sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE); dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; dscl_prog_data->isharp_en = 1; // ISHARP_EN @@ -1753,12 +1788,12 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) // Clamp spl_clamp_viewport(&spl_scratch.scl_data.viewport); - if (!res) - return res; - // Save all calculated parameters in dscl_prog_data structure to program hw registers spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp); + if (!res) + return res; + if (spl_in->lls_pref == LLS_PREF_YES) { if (spl_in->is_hdr_on) setup = HDR_L; diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h index afcc66206ca2..89af91e19b6c 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h @@ -7,7 +7,6 @@ #include "dc_spl_types.h" -#define ISHARP_LUT_TABLE_SIZE 32 const uint32_t *spl_get_filter_isharp_1D_lut_0(void); const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void); const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void); diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 2a74ff5fdfdb..55d557df4aa5 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -5,10 +5,8 @@ #ifndef __DC_SPL_TYPES_H__ #define __DC_SPL_TYPES_H__ +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef SPL_ASSERT -#define SPL_ASSERT(_bool) ((void *)0) -#endif #include "spl_fixpt31_32.h" // fixed31_32 and related functions #include "spl_custom_float.h" // custom float and related functions @@ -252,6 +250,7 @@ enum isharp_en { ISHARP_DISABLE, ISHARP_ENABLE }; +#define ISHARP_LUT_TABLE_SIZE 32 // Below struct holds values that can be directly used to program // hardware registers. No conversion/clamping is required struct dscl_prog_data { @@ -402,7 +401,7 @@ struct dscl_prog_data { uint32_t isharp_nl_en; // ISHARP_NL_EN ? TODO:check this struct isharp_lba isharp_lba; // ISHARP_LBA struct isharp_fmt isharp_fmt; // ISHARP_FMT - const uint32_t *isharp_delta; + uint32_t isharp_delta[ISHARP_LUT_TABLE_SIZE]; struct isharp_nldelta_sclip isharp_nldelta_sclip; // ISHARP_NLDELTA_SCLIP /* blur and scale filter */ const uint16_t *filter_blur_scale_v; @@ -498,7 +497,7 @@ enum scale_to_sharpness_policy { SCALE_TO_SHARPNESS_ADJ_YUV = 1, SCALE_TO_SHARPNESS_ADJ_ALL = 2 }; -struct spl_funcs { +struct spl_callbacks { void (*spl_calc_lb_num_partitions) (bool alpha_en, const struct spl_scaler_data *scl_data, @@ -510,7 +509,6 @@ struct spl_funcs { struct spl_debug { int visual_confirm_base_offset; int visual_confirm_dpp_offset; - enum sharpen_policy sharpen_policy; enum scale_to_sharpness_policy scale_to_sharpness_policy; }; @@ -520,7 +518,7 @@ struct spl_in { // Basic slice information int odm_slice_index; // ODM Slice Index using get_odm_split_index struct spl_taps scaling_quality; // Explicit Scaling Quality - struct spl_funcs *funcs; + struct spl_callbacks callbacks; // Inputs for isharp and EASF struct adaptive_sharpness adaptive_sharpness; // Adaptive Sharpness enum linear_light_scaling lls_pref; // Linear Light Scaling @@ -532,6 +530,7 @@ struct spl_in { int h_active; int v_active; int sdr_white_level_nits; + enum sharpen_policy sharpen_policy; }; // end of SPL inputs diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h index 5696dafd0894..a6f6132df241 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h @@ -5,21 +5,26 @@ #ifndef SPL_DEBUG_H #define SPL_DEBUG_H -#ifdef SPL_ASSERT -#undef SPL_ASSERT -#endif -#define SPL_ASSERT(b) +#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB) +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + kgdb_breakpoint(); \ + } \ +} while (0) +#else +#define SPL_ASSERT_CRITICAL(expr) do { \ + if (WARN_ON(!(expr))) { \ + ; \ + } \ +} while (0) +#endif /* CONFIG_HAVE_KGDB || CONFIG_KGDB */ -#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0) +#if defined(CONFIG_DEBUG_KERNEL_DC) +#define SPL_ASSERT(expr) SPL_ASSERT_CRITICAL(expr) +#else +#define SPL_ASSERT(expr) WARN_ON(!(expr)) +#endif /* CONFIG_DEBUG_KERNEL_DC */ -#ifdef SPL_DALMSG -#undef SPL_DALMSG -#endif -#define SPL_DALMSG(b) - -#ifdef SPL_DAL_ASSERT_MSG -#undef SPL_DAL_ASSERT_MSG -#endif -#define SPL_DAL_ASSERT_MSG(b, m) +#define SPL_BREAK_TO_DEBUGGER() SPL_ASSERT(0) #endif // SPL_DEBUG_H diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c index a95565df5487..131f1e3949d3 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c @@ -22,14 +22,14 @@ static inline unsigned long long abs_i64( * result = dividend / divisor * *remainder = dividend % divisor */ -static inline unsigned long long complete_integer_division_u64( +static inline unsigned long long spl_complete_integer_division_u64( unsigned long long dividend, unsigned long long divisor, unsigned long long *remainder) { unsigned long long result; - ASSERT(divisor); + SPL_ASSERT(divisor); result = spl_div64_u64_rem(dividend, divisor, remainder); @@ -60,10 +60,10 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den /* determine integer part */ - unsigned long long res_value = complete_integer_division_u64( + unsigned long long res_value = spl_complete_integer_division_u64( arg1_value, arg2_value, &remainder); - ASSERT(res_value <= LONG_MAX); + SPL_ASSERT(res_value <= (unsigned long long)LONG_MAX); /* determine fractional part */ { @@ -85,7 +85,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den { unsigned long long summand = (remainder << 1) >= arg2_value; - ASSERT(res_value <= LLONG_MAX - summand); + SPL_ASSERT(res_value <= (unsigned long long)LLONG_MAX - summand); res_value += summand; } @@ -118,19 +118,19 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed res.value = arg1_int * arg2_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg1_int * arg2_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg2_int * arg1_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -139,7 +139,7 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -163,17 +163,17 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) res.value = arg_int * arg_int; - ASSERT(res.value <= (long long)LONG_MAX); + SPL_ASSERT(res.value <= (long long)LONG_MAX); res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART; tmp = arg_int * arg_fra; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -182,7 +182,7 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg) tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + (tmp >= (unsigned long long)spl_fixpt_half.value); - ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); + SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -196,7 +196,7 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg) * Good idea to use Newton's method */ - ASSERT(arg.value); + SPL_ASSERT(arg.value); return spl_fixpt_from_fraction( spl_fixpt_one.value, @@ -286,7 +286,7 @@ struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg) * * Calculated as Taylor series. */ -static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) +static struct spl_fixed31_32 spl_fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg) { unsigned int n = 9; @@ -295,7 +295,7 @@ static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed3 n + 1); /* TODO find correct res */ - ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); + SPL_ASSERT(spl_fixpt_lt(arg, spl_fixpt_one)); do res = spl_fixpt_add( @@ -337,22 +337,22 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg) spl_fixpt_ln2, m)); - ASSERT(m != 0); + SPL_ASSERT(m != 0); - ASSERT(spl_fixpt_lt( + SPL_ASSERT(spl_fixpt_lt( spl_fixpt_abs(r), spl_fixpt_one)); if (m > 0) return spl_fixpt_shl( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), (unsigned char)m); else return spl_fixpt_div_int( - fixed31_32_exp_from_taylor_series(r), + spl_fixed31_32_exp_from_taylor_series(r), 1LL << -m); } else if (arg.value != 0) - return fixed31_32_exp_from_taylor_series(arg); + return spl_fixed31_32_exp_from_taylor_series(arg); else return spl_fixpt_one; } @@ -364,7 +364,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) struct spl_fixed31_32 error; - ASSERT(arg.value > 0); + SPL_ASSERT(arg.value > 0); /* TODO if arg is negative, return NaN */ /* TODO if arg is zero, return -INF */ @@ -396,7 +396,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg) * part in 32 bits. It is used in hw programming (scaler) */ -static inline unsigned int ux_dy( +static inline unsigned int spl_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits) @@ -415,13 +415,13 @@ static inline unsigned int ux_dy( return result | fractional_part; } -static inline unsigned int clamp_ux_dy( +static inline unsigned int spl_clamp_ux_dy( long long value, unsigned int integer_bits, unsigned int fractional_bits, unsigned int min_clamp) { - unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); + unsigned int truncated_val = spl_ux_dy(value, integer_bits, fractional_bits); if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) return (1 << (integer_bits + fractional_bits)) - 1; @@ -433,40 +433,40 @@ static inline unsigned int clamp_ux_dy( unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 3, 19); + return spl_ux_dy(arg.value, 3, 19); } unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 2, 19); + return spl_ux_dy(arg.value, 2, 19); } unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg) { - return ux_dy(arg.value, 0, 19); + return spl_ux_dy(arg.value, 0, 19); } unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 14, 1); + return spl_clamp_ux_dy(arg.value, 0, 14, 1); } unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg) { - return clamp_ux_dy(arg.value, 0, 10, 1); + return spl_clamp_ux_dy(arg.value, 0, 10, 1); } int spl_fixpt_s4d19(struct spl_fixed31_32 arg) { if (arg.value < 0) - return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19); + return -(int)spl_ux_dy(spl_fixpt_abs(arg).value, 4, 19); else - return ux_dy(arg.value, 4, 19); + return spl_ux_dy(arg.value, 4, 19); } struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h index 8a045e2f8699..ed2647f9a099 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h @@ -5,11 +5,8 @@ #ifndef __SPL_FIXED31_32_H__ #define __SPL_FIXED31_32_H__ -#include "os_types.h" +#include "spl_debug.h" #include "spl_os_types.h" // swap -#ifndef ASSERT -#define ASSERT(_bool) ((void *)0) -#endif #ifndef LLONG_MAX #define LLONG_MAX 9223372036854775807ll @@ -194,7 +191,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp( */ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift) { - ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || + SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift)))); arg.value = arg.value << shift; @@ -231,7 +228,7 @@ static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || + SPL_ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); res.value = arg1.value + arg2.value; @@ -256,7 +253,7 @@ static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, st { struct spl_fixed31_32 res; - ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || + SPL_ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); res.value = arg1.value - arg2.value; @@ -448,7 +445,7 @@ static inline int spl_fixpt_round(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_half.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -469,7 +466,7 @@ static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg) const long long summand = spl_fixpt_one.value - spl_fixpt_epsilon.value; - ASSERT(LLONG_MAX - (long long)arg_value >= summand); + SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; @@ -504,7 +501,7 @@ static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg bool negative = arg.value < 0; if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) { - ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); + SPL_ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); return arg; } diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h index 709706ed4f2c..2e6ba71960ac 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h @@ -6,6 +6,8 @@ #ifndef _SPL_OS_TYPES_H_ #define _SPL_OS_TYPES_H_ +#include "spl_debug.h" + #include <linux/slab.h> #include <linux/kgdb.h> #include <linux/kref.h> @@ -18,7 +20,6 @@ * general debug capabilities * */ -#define SPL_BREAK_TO_DEBUGGER() ASSERT(0) static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index fe5b6f7a3eb1..b353c4ceb60d 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -301,6 +301,7 @@ struct dmub_srv_hw_params { bool disallow_phy_access; bool disable_sldo_opt; bool enable_non_transparent_setconfig; + bool lower_hbr3_phy_ssc; }; /** @@ -570,6 +571,14 @@ struct dmub_notification { }; }; +/* enum dmub_ips_mode - IPS mode identifier */ +enum dmub_ips_mode { + DMUB_IPS_MODE_IPS1_MAX = 0, + DMUB_IPS_MODE_IPS2, + DMUB_IPS_MODE_IPS1_RCG, + DMUB_IPS_MODE_IPS1_ONO2_ON +}; + /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index ebcf68bfae2b..b800a507d1e0 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,11 @@ #pragma pack(push, 1) #define ABM_NUM_OF_ACE_SEGMENTS 5 +/** + * Debug FW state offset + */ +#define DMUB_DEBUG_FW_STATE_OFFSET 0x300 + union abm_flags { struct { /** @@ -490,6 +495,7 @@ struct dmub_feature_caps { uint8_t gecc_enable; uint8_t replay_supported; uint8_t replay_reserved[3]; + uint8_t abm_aux_backlight_support; }; struct dmub_visual_confirm_color { @@ -689,7 +695,8 @@ union dmub_fw_boot_options { uint32_t ips_disable: 3; /* options to disable ips support*/ uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ - uint32_t reserved : 7; /**< reserved */ + uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ + uint32_t reserved : 6; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -721,6 +728,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__INVALID = 0, DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, + DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -747,7 +755,8 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ - uint32_t reserved_bits : 28; /**< Reversed bits */ + uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */ + uint32_t reserved_bits : 27; /**< Reversed bits */ } bits; uint32_t all; }; @@ -757,6 +766,14 @@ union dmub_shared_state_ips_driver_signals { */ #define DMUB_SHARED_STATE__IPS_FW_VERSION 1 +struct dmub_shared_state_debug_setup { + union { + struct { + uint32_t exclude_points[62]; + } profile_mode; + }; +}; + /** * struct dmub_shared_state_ips_fw - Firmware state for IPS. */ @@ -809,6 +826,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_feature_common common; /**< Generic data */ struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ + struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1051,11 +1069,110 @@ enum dmub_gpint_command { DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, /** + * DESC: Set IPS residency measurement + * ARGS: 0 - Disable ips measurement + * 1 - Enable ips measurement + */ + DMUB_GPINT__IPS_RESIDENCY = 121, + + /** * DESC: Enable measurements for various task duration * ARGS: 0 - Disable measurement * 1 - Enable measurement */ DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124, + + /** + * DESC: Gets IPS1 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125, + + /** + * DESC: Gets IPS2 histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126, + + /** + * DESC: Gets IPS residency + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in milli-percent. + */ + DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127, + + /** + * DESC: Gets IPS1_RCG histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128, + + /** + * DESC: Gets IPS1_ONO2_ON histogram counts + * ARGS: Bucket index + * RETURN: Total count for the bucket + */ + DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129, + + /** + * DESC: Gets IPS entry counter during residency measurement + * ARGS: 0 - Return IPS1 entry counts + * 1 - Return IPS2 entry counts + * 2 - Return IPS1_RCG entry counts + * 3 - Return IPS2_ONO2_ON entry counts + * RETURN: Entry counter for selected IPS mode + */ + DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - lower 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131, + + /** + * DESC: Gets IPS inactive residency in microseconds + * ARGS: 0 - Return IPS1_MAX residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total inactive residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132, + + /** + * DESC: Gets IPS residency in microseconds + * ARGS: 0 - Return IPS1 residency + * 1 - Return IPS2 residency + * 2 - Return IPS1_RCG residency + * 3 - Return IPS1_ONO2_ON residency + * RETURN: Total residency in microseconds - upper 32 bits + */ + DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI = 133, + /** + * DESC: Setup debug configs. + */ + DMUB_GPINT__SETUP_DEBUG_MODE = 136, }; /** @@ -1306,9 +1423,10 @@ enum dmub_out_cmd_type { /* DMUB_CMD__DPIA command sub-types. */ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, - DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, + DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, // will be replaced by DPIA_SET_CONFIG_REQUEST DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3, + DMUB_CMD__DPIA_SET_CONFIG_REQUEST = 4, }; /* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ @@ -2097,7 +2215,7 @@ struct dmub_rb_cmd_dig1_dpia_control { }; /** - * SET_CONFIG Command Payload + * SET_CONFIG Command Payload (deprecated) */ struct set_config_cmd_payload { uint8_t msg_type; /* set config message type */ @@ -2105,7 +2223,7 @@ struct set_config_cmd_payload { }; /** - * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. (deprecated) */ struct dmub_cmd_set_config_control_data { struct set_config_cmd_payload cmd_pkt; @@ -2114,6 +2232,17 @@ struct dmub_cmd_set_config_control_data { }; /** + * SET_CONFIG Request Command Payload + */ +struct set_config_request_cmd_payload { + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned in case of error */ + uint8_t msg_type; /* set config message type */ + uint8_t reserved; + uint32_t msg_data; /* set config message data */ +}; + +/** * DMUB command structure for SET_CONFIG command. */ struct dmub_rb_cmd_set_config_access { @@ -2122,6 +2251,14 @@ struct dmub_rb_cmd_set_config_access { }; /** + * DMUB command structure for SET_CONFIG request command. + */ +struct dmub_rb_cmd_set_config_request { + struct dmub_cmd_header header; /* header */ + struct set_config_request_cmd_payload payload; /* set config request payload */ +}; + +/** * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ struct dmub_cmd_mst_alloc_slots_control_data { @@ -4290,6 +4427,24 @@ struct dmub_rb_cmd_abm_set_pipe { }; /** + * Type of backlight control method to be used by ABM module + */ +enum dmub_backlight_control_type { + /** + * PWM Backlight control + */ + DMU_BACKLIGHT_CONTROL_PWM = 0, + /** + * VESA Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_VESA_AUX = 1, + /** + * AMD DPCD Aux-based backlight control + */ + DMU_BACKLIGHT_CONTROL_AMD_AUX = 2, +}; + +/** * Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command. */ struct dmub_cmd_abm_set_backlight_data { @@ -4316,9 +4471,42 @@ struct dmub_cmd_abm_set_backlight_data { uint8_t panel_mask; /** + * AUX HW Instance. + */ + uint8_t aux_inst; + + /** * Explicit padding to 4 byte boundary. */ - uint8_t pad[2]; + uint8_t pad[1]; + + /** + * Backlight control type. + * Value 0 is PWM backlight control. + * Value 1 is VAUX backlight control. + * Value 2 is AMD DPCD AUX backlight control. + */ + enum dmub_backlight_control_type backlight_control_type; + + /** + * Minimum luminance in nits. + */ + uint32_t min_luminance; + + /** + * Maximum luminance in nits. + */ + uint32_t max_luminance; + + /** + * Minimum backlight in pwm. + */ + uint32_t min_backlight_pwm; + + /** + * Maximum backlight in pwm. + */ + uint32_t max_backlight_pwm; }; /** @@ -5022,7 +5210,34 @@ struct dmub_rb_cmd_get_usbc_cable_id { enum dmub_cmd_secure_display_type { DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0, /* test command to only check if inbox message works */ DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE, - DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY + DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE, + DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY +}; + +#define MAX_ROI_NUM 2 + +struct dmub_cmd_roi_info { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + uint8_t otg_id; + uint8_t phy_id; +}; + +struct dmub_cmd_roi_window_ctl { + uint16_t x_start; + uint16_t x_end; + uint16_t y_start; + uint16_t y_end; + bool enable; +}; + +struct dmub_cmd_roi_ctl_info { + uint8_t otg_id; + uint8_t phy_id; + struct dmub_cmd_roi_window_ctl roi_ctl[MAX_ROI_NUM]; }; /** @@ -5033,14 +5248,8 @@ struct dmub_rb_cmd_secure_display { /** * Data passed from driver to dmub firmware. */ - struct dmub_cmd_roi_info { - uint16_t x_start; - uint16_t x_end; - uint16_t y_start; - uint16_t y_end; - uint8_t otg_id; - uint8_t phy_id; - } roi_info; + struct dmub_cmd_roi_info roi_info; + struct dmub_cmd_roi_ctl_info mul_roi_ctl; }; /** @@ -5318,7 +5527,11 @@ union dmub_rb_cmd { /** * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. */ - struct dmub_rb_cmd_set_config_access set_config_access; + struct dmub_rb_cmd_set_config_access set_config_access; // (deprecated) + /** + * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + */ + struct dmub_rb_cmd_set_config_request set_config_request; /** * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 2ccad79053c5..e5e77bd3c31e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -426,6 +426,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; + boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -463,7 +464,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset; uint32_t is_traceport_enabled, is_cw6_enabled; if (!dmub || !diag_data) @@ -513,9 +514,6 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); diag_data->is_dmcub_soft_reset = is_soft_reset; - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - diag_data->is_dmcub_secure_reset = is_sec_reset; - REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); diag_data->is_traceport_en = is_traceport_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index db16066bc893..a3f3ff5d49ac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -497,6 +497,7 @@ enum dmub_status const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE; uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 }; if (!dmub->sw_init) @@ -514,6 +515,7 @@ enum dmub_status fw_state_size = fw_info->fw_region_size; trace_buffer_size = fw_info->trace_buffer_size; + shared_state_size = fw_info->shared_state_size; /** * If DM didn't fill in a version, then fill it in based on @@ -534,7 +536,7 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; - window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE; + window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index aee5170f5fb2..de8f3cfed6c8 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -164,18 +164,19 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; -#define DP_SOURCE_SEQUENCE 0x30c -#define DP_SOURCE_TABLE_REVISION 0x310 -#define DP_SOURCE_PAYLOAD_SIZE 0x311 -#define DP_SOURCE_SINK_CAP 0x317 -#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 -#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 -#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E -#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F -#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SOURCE_SEQUENCE 0x30C +#define DP_SOURCE_TABLE_REVISION 0x310 +#define DP_SOURCE_PAYLOAD_SIZE 0x311 +#define DP_SOURCE_SINK_CAP 0x317 +#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 +#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 +#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E +#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F +#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 #define DP_SINK_PR_REPLAY_STATUS 0x378 #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A +#define DP_SINK_EMISSION_RATE 0x37E /* Remove once drm_dp_helper.h is updated upstream */ #ifndef DP_TOTAL_LTTPR_CNT diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index 02c23b04d34b..058f882d5bdd 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -52,10 +52,6 @@ void update_surface_trace( void post_surface_trace(struct dc *dc); -void context_timing_trace( - struct dc *dc, - struct resource_context *res_ctx); - void context_clock_trace( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index a48d564d1660..4d68c1c6e210 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -61,11 +61,13 @@ #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) -#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) #define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) +#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__) +#define DC_LOG_REGISTER_READ(...) pr_debug("[REGISTER_READ]: "__VA_ARGS__) +#define DC_LOG_REGISTER_WRITE(...) pr_debug("[REGISTER_WRITE]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 3699e633801d..a71df052cf25 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1399,71 +1399,6 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb, pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); } -/* todo: all these scale_gamma functions are inherently the same but - * take different structures as params or different format for ramp - * values. We could probably implement it in a more generic fashion - */ -static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, - const struct regamma_ramp *ramp, - struct dividers dividers) -{ - unsigned short max_driver = 0xFFFF; - unsigned short max_os = 0xFF00; - unsigned short scaler = max_os; - uint32_t i; - struct pwl_float_data *rgb = pwl_rgb; - struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1; - - i = 0; - do { - if (ramp->gamma[i] > max_os || - ramp->gamma[i + 256] > max_os || - ramp->gamma[i + 512] > max_os) { - scaler = max_driver; - break; - } - i++; - } while (i != GAMMA_RGB_256_ENTRIES); - - i = 0; - do { - rgb->r = dc_fixpt_from_fraction( - ramp->gamma[i], scaler); - rgb->g = dc_fixpt_from_fraction( - ramp->gamma[i + 256], scaler); - rgb->b = dc_fixpt_from_fraction( - ramp->gamma[i + 512], scaler); - - ++rgb; - ++i; - } while (i != GAMMA_RGB_256_ENTRIES); - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider1); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider1); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider1); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider2); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider2); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider2); - - ++rgb; - - rgb->r = dc_fixpt_mul(rgb_last->r, - dividers.divider3); - rgb->g = dc_fixpt_mul(rgb_last->g, - dividers.divider3); - rgb->b = dc_fixpt_mul(rgb_last->b, - dividers.divider3); -} - /* * RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here * Input is evenly distributed in the output color space as specified in @@ -1663,106 +1598,6 @@ static bool calculate_interpolated_hardware_curve( return true; } -/* The "old" interpolation uses a complicated scheme to build an array of - * coefficients while also using an array of 0-255 normalized to 0-1 - * Then there's another loop using both of the above + new scaled user ramp - * and we concatenate them. It also searches for points of interpolation and - * uses enums for positions. - * - * This function uses a different approach: - * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255 - * To find index for hwX , we notice the following: - * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1 - * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT - * - * Once the index is known, combined Y is simply: - * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index) - * - * We should switch to this method in all cases, it's simpler and faster - * ToDo one day - for now this only applies to ADL regamma to avoid regression - * for regular use cases (sRGB and PQ) - */ -static void interpolate_user_regamma(uint32_t hw_points_num, - struct pwl_float_data *rgb_user, - bool apply_degamma, - struct dc_transfer_func_distributed_points *tf_pts) -{ - uint32_t i; - uint32_t color = 0; - int32_t index; - int32_t index_next; - struct fixed31_32 *tf_point; - struct fixed31_32 hw_x; - struct fixed31_32 norm_factor = - dc_fixpt_from_int(255); - struct fixed31_32 norm_x; - struct fixed31_32 index_f; - struct fixed31_32 lut1; - struct fixed31_32 lut2; - struct fixed31_32 delta_lut; - struct fixed31_32 delta_index; - const struct fixed31_32 one = dc_fixpt_from_int(1); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - tf_pts->red[i] = dc_fixpt_zero; - tf_pts->green[i] = dc_fixpt_zero; - tf_pts->blue[i] = dc_fixpt_zero; - ++i; - } - while (i <= hw_points_num + 1) { - for (color = 0; color < 3; color++) { - if (color == 0) - tf_point = &tf_pts->red[i]; - else if (color == 1) - tf_point = &tf_pts->green[i]; - else - tf_point = &tf_pts->blue[i]; - - if (apply_degamma) { - if (color == 0) - hw_x = coordinates_x[i].regamma_y_red; - else if (color == 1) - hw_x = coordinates_x[i].regamma_y_green; - else - hw_x = coordinates_x[i].regamma_y_blue; - } else - hw_x = coordinates_x[i].x; - - if (dc_fixpt_le(one, hw_x)) - hw_x = one; - - norm_x = dc_fixpt_mul(norm_factor, hw_x); - index = dc_fixpt_floor(norm_x); - if (index < 0 || index > 255) - continue; - - index_f = dc_fixpt_from_int(index); - index_next = (index == 255) ? index : index + 1; - - if (color == 0) { - lut1 = rgb_user[index].r; - lut2 = rgb_user[index_next].r; - } else if (color == 1) { - lut1 = rgb_user[index].g; - lut2 = rgb_user[index_next].g; - } else { - lut1 = rgb_user[index].b; - lut2 = rgb_user[index_next].b; - } - - // we have everything now, so interpolate - delta_lut = dc_fixpt_sub(lut2, lut1); - delta_index = dc_fixpt_sub(norm_x, index_f); - - *tf_point = dc_fixpt_add(lut1, - dc_fixpt_mul(delta_index, delta_lut)); - } - ++i; - } -} - static void build_new_custom_resulted_curve( uint32_t hw_points_num, struct dc_transfer_func_distributed_points *tf_pts) @@ -1784,29 +1619,6 @@ static void build_new_custom_resulted_curve( } } -static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma, - uint32_t hw_points_num, struct calculate_buffer *cal_buffer) -{ - uint32_t i; - - struct gamma_coefficients coeff; - struct pwl_float_data_ex *rgb = rgb_regamma; - const struct hw_x_point *coord_x = coordinates_x; - - build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); - - i = 0; - while (i != hw_points_num + 1) { - rgb->r = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - rgb->g = rgb->r; - rgb->b = rgb->r; - ++coord_x; - ++rgb; - ++i; - } -} - static bool map_regamma_hw_to_x_user( const struct dc_gamma *ramp, struct pixel_gamma_point *coeff128, @@ -1855,125 +1667,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct gamma_coefficients coeff; - const struct hw_x_point *coord_x = coordinates_x; - uint32_t i = 0; - - do { - coeff.a0[i] = dc_fixpt_from_fraction( - regamma->coeff.A0[i], 10000000); - coeff.a1[i] = dc_fixpt_from_fraction( - regamma->coeff.A1[i], 1000); - coeff.a2[i] = dc_fixpt_from_fraction( - regamma->coeff.A2[i], 1000); - coeff.a3[i] = dc_fixpt_from_fraction( - regamma->coeff.A3[i], 1000); - coeff.user_gamma[i] = dc_fixpt_from_fraction( - regamma->coeff.gamma[i], 1000); - - ++i; - } while (i != 3); - - i = 0; - /* fixed_pt library has problems handling too small values */ - while (i != 32) { - output_tf->tf_pts.red[i] = dc_fixpt_zero; - output_tf->tf_pts.green[i] = dc_fixpt_zero; - output_tf->tf_pts.blue[i] = dc_fixpt_zero; - ++coord_x; - ++i; - } - while (i != MAX_HW_POINTS + 1) { - output_tf->tf_pts.red[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 0, cal_buffer); - output_tf->tf_pts.green[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 1, cal_buffer); - output_tf->tf_pts.blue[i] = translate_from_linear_space_ex( - coord_x->x, &coeff, 2, cal_buffer); - ++coord_x; - ++i; - } - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts); - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - return true; -} - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - bool ret = false; - - if (regamma == NULL) - return false; - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers); - - if (regamma->flags.bits.applyDegamma == 1) { - apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer); - copy_rgb_regamma_to_coordinates_x(coordinates_x, - MAX_HW_POINTS, rgb_regamma); - } - - interpolate_user_regamma(MAX_HW_POINTS, rgb_user, - regamma->flags.bits.applyDegamma, tf_pts); - - // no custom HDR curves! - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts); - - // this function just clamps output to 0-1 - build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts); - - ret = true; - - kfree(rgb_regamma); -rgb_regamma_alloc_fail: - kfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *input_tf, const struct dc_gamma *ramp, bool map_user_ramp) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index ee5c466613de..97e55278940e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -115,15 +115,4 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps, struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - -bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, - const struct regamma_lut *regamma, - struct calculate_buffer *cal_buffer, - const struct dc_gamma *ramp); - - #endif /* COLOR_MOD_COLOR_GAMMA_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index bbd259cea4f4..f980a84dceef 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -48,6 +48,7 @@ #define VSYNCS_BETWEEN_FLIP_THRESHOLD 2 #define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5 #define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500 +#define MICRO_HZ_TO_HZ(x) (x / 1000000) struct core_freesync { struct mod_freesync public; @@ -128,13 +129,26 @@ unsigned int mod_freesync_calc_v_total_from_refresh( unsigned int v_total; unsigned int frame_duration_in_ns; + if (refresh_in_uhz == 0) + return stream->timing.v_total; + frame_duration_in_ns = ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - v_total = div64_u64(div64_u64(((unsigned long long)( - frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total) + 500000, 1000000); + if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + /* When the target refresh rate is the minimum panel refresh rate, + * round down the vtotal value to avoid stretching vblank over + * panel's vtotal boundary. + */ + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total), 1000000); + } else { + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total) + 500000, 1000000); + } /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index c996365e84b0..1d41dd58f6bc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -27,6 +27,11 @@ #include "hdcp.h" +static inline uint16_t get_hdmi_rxstatus_msg_size(const uint8_t rxstatus[2]) +{ + return HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rxstatus[1]) << 8 | rxstatus[0]; +} + static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) { uint8_t is_ready = 0; @@ -35,8 +40,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; else is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) && - (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus) != 0) ? 1 : 0; return is_ready ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; } @@ -84,15 +88,13 @@ static inline enum mod_hdcp_status check_link_integrity_failure_dp( static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint16_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_SUCCESS; } else { status = mod_hdcp_read_rxstatus(hdcp); if (status == MOD_HDCP_STATUS_SUCCESS) { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; @@ -104,7 +106,6 @@ static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -115,8 +116,7 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; @@ -128,7 +128,6 @@ out: static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) @@ -139,8 +138,7 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; } else { - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; @@ -152,7 +150,6 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; - uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; uint16_t wait_time = max_wait / num_polls; @@ -167,8 +164,7 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) if (status != MOD_HDCP_STATUS_SUCCESS) break; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; @@ -181,7 +177,6 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; - uint8_t size; if (is_dp_hdcp(hdcp)) { status = MOD_HDCP_STATUS_INVALID_OPERATION; @@ -189,8 +184,7 @@ static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) status = mod_hdcp_read_rxstatus(hdcp); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; @@ -249,8 +243,7 @@ static uint8_t process_rxstatus(struct mod_hdcp *hdcp, sizeof(hdcp->auth.msg.hdcp2.rx_id_list); else hdcp->auth.msg.hdcp2.rx_id_list_size = - HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | - hdcp->auth.msg.hdcp2.rxstatus[0]; + get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus); } out: return (*status == MOD_HDCP_STATUS_SUCCESS); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 3f91926a50e9..7eefcb0f5070 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -28,6 +28,8 @@ #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ +struct amdgpu_ip_block; + /* * Chip flags @@ -337,6 +339,11 @@ enum DC_DEBUG_MASK { * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. */ DC_FORCE_IPS_ENABLE = 0x4000, + /** + * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for + * eDP display from ACPI _DDC method. + */ + DC_DISABLE_ACPI_EDID = 0x8000, }; enum amd_dpm_forced_level; @@ -377,30 +384,30 @@ enum amd_dpm_forced_level; */ struct amd_ip_funcs { char *name; - int (*early_init)(void *handle); - int (*late_init)(void *handle); - int (*sw_init)(void *handle); - int (*sw_fini)(void *handle); - int (*early_fini)(void *handle); - int (*hw_init)(void *handle); - int (*hw_fini)(void *handle); - void (*late_fini)(void *handle); - int (*prepare_suspend)(void *handle); - int (*suspend)(void *handle); - int (*resume)(void *handle); + int (*early_init)(struct amdgpu_ip_block *ip_block); + int (*late_init)(struct amdgpu_ip_block *ip_block); + int (*sw_init)(struct amdgpu_ip_block *ip_block); + int (*sw_fini)(struct amdgpu_ip_block *ip_block); + int (*early_fini)(struct amdgpu_ip_block *ip_block); + int (*hw_init)(struct amdgpu_ip_block *ip_block); + int (*hw_fini)(struct amdgpu_ip_block *ip_block); + void (*late_fini)(struct amdgpu_ip_block *ip_block); + int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); + int (*suspend)(struct amdgpu_ip_block *ip_block); + int (*resume)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(void *handle); - int (*wait_for_idle)(void *handle); - bool (*check_soft_reset)(void *handle); - int (*pre_soft_reset)(void *handle); - int (*soft_reset)(void *handle); - int (*post_soft_reset)(void *handle); + int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); + bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block); + int (*soft_reset)(struct amdgpu_ip_block *ip_block); + int (*post_soft_reset)(struct amdgpu_ip_block *ip_block); int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); int (*set_powergating_state)(void *handle, enum amd_powergating_state state); void (*get_clockgating_state)(void *handle, u64 *flags); - void (*dump_ip_state)(void *handle); - void (*print_ip_state)(void *handle, struct drm_printer *p); + void (*dump_ip_state)(struct amdgpu_ip_block *ip_block); + void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p); }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h index f42a276499cd..5d9d5fea6e06 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h @@ -6199,10 +6199,12 @@ #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS__SHIFT 0x1 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2 #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3 +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG__SHIFT 0x4 #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f #define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS_MASK 0x00000002L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L #define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L +#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG_MASK 0x3FFFFFF0L #define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L //DCHUBBUB_TIMEOUT_DETECTION_CTRL1 #define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h index 2c3ce243861a..380e44230bda 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h @@ -1232,6 +1232,29 @@ #define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d #define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0 +// Stand Alone Walker Registers +#define VMC_TAP_PDE_REQUEST_SNOOP_OFFSET 8 +#define VMC_TAP_PTE_REQUEST_SNOOP_OFFSET 11 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0606 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0607 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0608 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0609 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x060a +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x060b +#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 +#define mmVM_L2_SAW_CONTEXT0_CNTL 0x0604 +#define mmVM_L2_SAW_CONTEXT0_CNTL_BASE_IDX 0 +#define CONTEXT0_CNTL_ENABLE_OFFSET 0 +#define CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET 1 +#define mmVM_L2_SAW_CONTEXTS_DISABLE 0x060c +#define mmVM_L2_SAW_CONTEXTS_DISABLE_BASE_IDX 0 +#define mmVM_L2_SAW_CNTL4 0x0603 +#define mmVM_L2_SAW_CNTL4_BASE_IDX 0 // addressBlock: mmhub_utcl2_atcl2dec // base address: 0x69900 diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 19a48d98830a..bb27c0d2a9ae 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -119,6 +119,8 @@ enum pp_clock_type { OD_ACOUSTIC_TARGET, OD_FAN_TARGET_TEMPERATURE, OD_FAN_MINIMUM_PWM, + OD_FAN_ZERO_RPM_ENABLE, + OD_FAN_ZERO_RPM_STOP_TEMP, }; enum amd_pp_sensors { @@ -199,6 +201,8 @@ enum PP_OD_DPM_TABLE_COMMAND { PP_OD_EDIT_ACOUSTIC_TARGET, PP_OD_EDIT_FAN_TARGET_TEMPERATURE, PP_OD_EDIT_FAN_MINIMUM_PWM, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, }; struct pp_states_info { @@ -336,7 +340,8 @@ enum pp_policy_soc_pstate { #define MAX_CLKS 4 #define NUM_VCN 4 #define NUM_JPEG_ENG 32 - +#define MAX_XCC 8 +#define NUM_XCP 8 struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -350,6 +355,15 @@ struct pp_smu_wm_range_sets; struct pp_smu_nv_clock_table; struct dpm_clocks; +struct amdgpu_xcp_metrics { + /* Utilization Instantaneous (%) */ + u32 gfx_busy_inst[MAX_XCC]; + u16 jpeg_busy[NUM_JPEG_ENG]; + u16 vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + u64 gfx_busy_acc[MAX_XCC]; +}; + struct amd_pm_funcs { /* export for dpm on ci and si */ int (*pre_set_power_state)(void *handle); @@ -872,6 +886,97 @@ struct gpu_metrics_v1_5 { uint16_t padding; }; +struct gpu_metrics_v1_6 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Accumulation cycle counter */ + uint32_t accumulation_counter; + + /* Accumulated throttler residencies */ + uint32_t prochot_residency_acc; + uint32_t ppt_residency_acc; + uint32_t socket_thm_residency_acc; + uint32_t vr_thm_residency_acc; + uint32_t hbm_thm_residency_acc; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + /* Number of current partition */ + uint16_t num_partition; + + /* XCP metrics stats */ + struct amdgpu_xcp_metrics xcp_stats[NUM_XCP]; + + /* PCIE other end recovery counter */ + uint32_t pcie_lc_perf_other_end_recovery; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 21ceafce1f9b..eb46cb10c24d 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -230,13 +230,23 @@ union MESAPI_SET_HW_RESOURCES { uint32_t disable_add_queue_wptr_mc_addr : 1; uint32_t enable_mes_event_int_logging : 1; uint32_t enable_reg_active_poll : 1; - uint32_t reserved : 21; + uint32_t use_disable_queue_in_legacy_uq_preemption : 1; + uint32_t send_write_data : 1; + uint32_t os_tdr_timeout_override : 1; + uint32_t use_rs64mem_for_proc_gang_ctx : 1; + uint32_t use_add_queue_unmap_flag_addr : 1; + uint32_t enable_mes_sch_stb_log : 1; + uint32_t limit_single_process : 1; + uint32_t is_strix_tmz_wa_enabled :1; + uint32_t reserved : 13; }; uint32_t uint32_t_all; }; uint32_t oversubscription_timer; uint64_t doorbell_info; uint64_t event_intr_history_gpu_mc_ptr; + uint64_t timestamp; + uint32_t os_tdr_timeout_in_sec; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -563,6 +573,11 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__READ_REG, MESAPI_MISC__WAIT_REG_MEM, MESAPI_MISC__SET_SHADER_DEBUGGER, + MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, + MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__MAX, }; @@ -617,6 +632,31 @@ struct SET_SHADER_DEBUGGER { uint32_t trap_en; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 31; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -631,6 +671,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 101e2fe962c6..c9b2ca5cf75f 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -643,6 +643,10 @@ enum MESAPI_MISC_OPCODE { MESAPI_MISC__SET_SHADER_DEBUGGER, MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE, MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES, + MESAPI_MISC__QUERY_HUNG_ENGINE_ID, + MESAPI_MISC__CHANGE_CONFIG, + MESAPI_MISC__LAUNCH_CLEANER_SHADER, + MESAPI_MISC__SETUP_MES_DBGEXT, MESAPI_MISC__MAX, }; @@ -713,6 +717,31 @@ struct SET_GANG_SUBMIT { uint32_t slave_gang_context_array_index; }; +enum MESAPI_MISC__CHANGE_CONFIG_OPTION { + MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0, + MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1, + MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2, + + MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F +}; + +struct CHANGE_CONFIG { + enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode; + union { + struct { + uint32_t limit_single_process : 1; + uint32_t enable_hws_logging_buffer : 1; + uint32_t reserved : 30; + } bits; + uint32_t all; + } option; + + struct { + uint32_t tdr_level; + uint32_t tdr_delay; + } tdr_config; +}; + union MESAPI__MISC { struct { union MES_API_HEADER header; @@ -726,7 +755,7 @@ union MESAPI__MISC { struct WAIT_REG_MEM wait_reg_mem; struct SET_SHADER_DEBUGGER set_shader_debugger; enum MES_AMD_PRIORITY_LEVEL queue_sch_level; - + struct CHANGE_CONFIG change_config; uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; }; uint64_t timestamp; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index d5d6ab484e5a..136e8193867c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -145,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -185,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_power_state(adev, state); @@ -273,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; level = amdgpu_dpm_get_performance_level(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%s\n", @@ -336,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { @@ -374,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); buf_len = sysfs_emit(buf, "states: %d\n", data.nums); @@ -412,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_dpm_get_current_power_state(adev, &pm); ret = amdgpu_dpm_get_pp_num_states(adev, &data); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -485,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_get_pp_num_states(adev, &data); if (ret) @@ -544,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (size <= 0) @@ -580,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pp_table(adev, buf, count); @@ -808,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } if (amdgpu_dpm_set_fine_grain_clk_vol(adev, type, @@ -865,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; for (clk_index = 0 ; clk_index < 6 ; clk_index++) { ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); @@ -888,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -929,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); @@ -960,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_ppfeature_status(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1029,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); if (ret == -ENOENT) @@ -1042,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1102,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, if (ret) return ret; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_force_clock_level(adev, type, mask); @@ -1283,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_sclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1317,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1345,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; value = amdgpu_dpm_get_mclk_od(adev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%d\n", value); @@ -1379,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (ret) return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); @@ -1427,17 +1379,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_power_profile_mode(adev, buf); if (size <= 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1492,11 +1441,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); @@ -1520,16 +1467,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; /* get the sensor value */ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1639,15 +1583,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (!adev->asic_funcs->get_pcie_usage) return -ENODATA; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return sysfs_emit(buf, "%llu %llu %i\n", @@ -1770,11 +1711,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); if (!ret) @@ -1782,7 +1721,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, else size = sysfs_emit(buf, "failed to get thermal limit\n"); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1807,14 +1745,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return -EINVAL; } - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); dev_err(dev, "failed to update thermal limit\n"); return ret; } @@ -1849,15 +1787,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -1890,11 +1825,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + ret = pm_runtime_get_if_active(ddev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -1906,7 +1839,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, memcpy(buf, gpu_metrics, size); out: - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return size; @@ -2008,11 +1940,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); + r = pm_runtime_resume_and_get(ddev->dev); + if (r < 0) return r; - } r = kstrtoint(buf, 10, &bias); if (r) @@ -2335,11 +2265,9 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, policy_attr = container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); + ret = pm_runtime_resume_and_get(ddev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); @@ -2772,15 +2700,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -2817,11 +2742,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, else return -EINVAL; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + ret = pm_runtime_resume_and_get(adev->dev); + if (ret < 0) return ret; - } ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -2866,11 +2789,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -2907,15 +2828,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -2937,15 +2855,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3001,15 +2916,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } + err = pm_runtime_get_if_active(adev->dev); + if (err <= 0) + return err ?: -EPERM; err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) @@ -3036,11 +2948,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (err) return err; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); if (err) @@ -3076,15 +2986,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) @@ -3119,11 +3026,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); @@ -3248,11 +3153,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + r = pm_runtime_get_if_active(adev->dev); + if (r <= 0) + return r ?: -EPERM; r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); @@ -3262,7 +3165,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, else size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; @@ -3339,11 +3241,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ value |= limit_type << 24; - err = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (err < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + err = pm_runtime_resume_and_get(adev->dev); + if (err < 0) return err; - } err = amdgpu_dpm_set_power_limit(adev, value); @@ -3787,17 +3687,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - ret = pm_runtime_get_sync(adev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(adev->dev); - return ret; - } + ret = pm_runtime_get_if_active(adev->dev); + if (ret <= 0) + return ret ?: -EPERM; size = amdgpu_dpm_print_clock_levels(adev, od_type, buf); if (size == 0) size = sysfs_emit(buf, "\n"); - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); return size; @@ -3879,23 +3776,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, if (ret) return ret; - ret = pm_runtime_get_sync(adev->dev); + ret = pm_runtime_resume_and_get(adev->dev); if (ret < 0) - goto err_out0; + return ret; ret = amdgpu_dpm_odn_edit_dpm_table(adev, cmd_type, parameter, parameter_size); if (ret) - goto err_out1; + goto err_out; if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { ret = amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); if (ret) - goto err_out1; + goto err_out; } pm_runtime_mark_last_busy(adev->dev); @@ -3903,9 +3800,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, return count; -err_out1: +err_out: pm_runtime_mark_last_busy(adev->dev); -err_out0: pm_runtime_put_autosuspend(adev->dev); return ret; @@ -4213,6 +4109,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) return umode; } +/** + * DOC: fan_zero_rpm_enable + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + */ +static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); +} + +static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, + buf, + count); +} + +static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) + umode |= S_IWUSR; + + return umode; +} + +/** + * DOC: fan_zero_rpm_stop_temperature + * + * The amdgpu driver provides a sysfs API for checking and adjusting the + * zero RPM stop temperature feature. + * + * Reading back the file shows you the current setting and the permitted + * ranges if changable. + * + * Writing an integer to the file, change the setting accordingly. + * + * When you have finished the editing, write "c" (commit) to the file to commit + * your changes. + * + * If you want to reset to the default value, write "r" (reset) to the file to + * reset them. + * + * This setting works only if the Zero RPM setting is enabled. It adjusts the + * temperature below which the fan can stop. + */ +static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); +} + +static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); + struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; + + return (ssize_t)amdgpu_distribute_custom_od_settings(adev, + PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, + buf, + count); +} + +static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) +{ + umode_t umode = 0000; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) + umode |= S_IRUSR | S_IRGRP | S_IROTH; + + if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) + umode |= S_IWUSR; + + return umode; +} + static struct od_feature_set amdgpu_od_set = { .containers = { [0] = { @@ -4258,6 +4265,22 @@ static struct od_feature_set amdgpu_od_set = { .store = fan_minimum_pwm_store, }, }, + [5] = { + .name = "fan_zero_rpm_enable", + .ops = { + .is_visible = fan_zero_rpm_enable_visible, + .show = fan_zero_rpm_enable_show, + .store = fan_zero_rpm_enable_store, + }, + }, + [6] = { + .name = "fan_zero_rpm_stop_temperature", + .ops = { + .is_visible = fan_zero_rpm_stop_temp_visible, + .show = fan_zero_rpm_stop_temp_show, + .store = fan_zero_rpm_stop_temp_store, + }, + }, }, }, }, @@ -4758,11 +4781,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) if (adev->in_suspend && !adev->in_runpm) return -EPERM; - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(dev->dev); + r = pm_runtime_resume_and_get(dev->dev); + if (r < 0) return r; - } if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); @@ -4777,7 +4798,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) seq_printf(m, "\n"); out: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index f5bf41f21c41..363af8990aa2 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -328,6 +328,10 @@ struct config_table_setting #define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8) #define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE BIT(10) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET BIT(11) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE BIT(12) +#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET BIT(13) struct amdgpu_pm { struct mutex mutex; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index e8b6989a40f3..8908646ad620 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -2954,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle) return actual_temp; } -static int kv_dpm_early_init(void *handle) +static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &kv_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -2965,10 +2965,10 @@ static int kv_dpm_early_init(void *handle) return 0; } -static int kv_dpm_late_init(void *handle) +static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block) { /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -2979,11 +2979,10 @@ static int kv_dpm_late_init(void *handle) return 0; } -static int kv_dpm_sw_init(void *handle) +static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -3024,9 +3023,9 @@ dpm_failed: return ret; } -static int kv_dpm_sw_fini(void *handle) +static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -3035,10 +3034,10 @@ static int kv_dpm_sw_fini(void *handle) return 0; } -static int kv_dpm_hw_init(void *handle) +static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -3053,9 +3052,9 @@ static int kv_dpm_hw_init(void *handle) return ret; } -static int kv_dpm_hw_fini(void *handle) +static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) kv_dpm_disable(adev); @@ -3063,9 +3062,9 @@ static int kv_dpm_hw_fini(void *handle) return 0; } -static int kv_dpm_suspend(void *handle) +static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -3076,10 +3075,10 @@ static int kv_dpm_suspend(void *handle) return 0; } -static int kv_dpm_resume(void *handle) +static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -3100,17 +3099,6 @@ static bool kv_dpm_is_idle(void *handle) return true; } -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -3314,12 +3302,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .suspend = kv_dpm_suspend, .resume = kv_dpm_resume, .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version kv_smu_ip_block = { diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a1baa13ab2c2..ee23a0f897c5 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -4755,13 +4755,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, u32 dram_timing; u32 dram_timing2; u32 burst_time; + int ret; arb_regs->mc_arb_rfsh_rate = (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); + ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk, + pl->mclk); + if (ret) + return ret; dram_timing = RREG32(MC_ARB_DRAM_TIMING); dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); @@ -7619,10 +7621,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int si_dpm_late_init(void *handle) +static int si_dpm_late_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!adev->pm.dpm_enabled) return 0; @@ -7716,10 +7718,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) return err; } -static int si_dpm_sw_init(void *handle) +static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -7763,9 +7765,9 @@ dpm_failed: return ret; } -static int si_dpm_sw_fini(void *handle) +static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; flush_work(&adev->pm.dpm.thermal.work); @@ -7774,11 +7776,11 @@ static int si_dpm_sw_fini(void *handle) return 0; } -static int si_dpm_hw_init(void *handle) +static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (!amdgpu_dpm) return 0; @@ -7793,9 +7795,9 @@ static int si_dpm_hw_init(void *handle) return ret; } -static int si_dpm_hw_fini(void *handle) +static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) si_dpm_disable(adev); @@ -7803,9 +7805,9 @@ static int si_dpm_hw_fini(void *handle) return 0; } -static int si_dpm_suspend(void *handle) +static int si_dpm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* disable dpm */ @@ -7816,10 +7818,10 @@ static int si_dpm_suspend(void *handle) return 0; } -static int si_dpm_resume(void *handle) +static int si_dpm_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -7841,17 +7843,12 @@ static bool si_dpm_is_idle(void *handle) return true; } -static int si_dpm_wait_for_idle(void *handle) +static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX */ return 0; } -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - static int si_dpm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -7928,10 +7925,10 @@ static void si_dpm_print_power_state(void *handle, amdgpu_dpm_print_ps_status(adev, rps); } -static int si_dpm_early_init(void *handle) +static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; adev->powerplay.pp_funcs = &si_dpm_funcs; adev->powerplay.pp_handle = adev; @@ -8047,11 +8044,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = { .resume = si_dpm_resume, .is_idle = si_dpm_is_idle, .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version si_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a71c6117d7e5..26624a716fc6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -75,11 +75,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev) hwmgr = NULL; } -static int pp_early_init(void *handle) +static int pp_early_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = handle; - + struct amdgpu_device *adev = ip_block->adev; ret = amd_powerplay_create(adev); if (ret != 0) @@ -131,9 +130,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work) orderly_poweroff(true); } -static int pp_sw_init(void *handle) +static int pp_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; @@ -148,9 +147,9 @@ static int pp_sw_init(void *handle) return ret; } -static int pp_sw_fini(void *handle) +static int pp_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; hwmgr_sw_fini(hwmgr); @@ -160,10 +159,10 @@ static int pp_sw_fini(void *handle) return 0; } -static int pp_hw_init(void *handle) +static int pp_hw_init(struct amdgpu_ip_block *ip_block) { int ret = 0; - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; ret = hwmgr_hw_init(hwmgr); @@ -174,10 +173,9 @@ static int pp_hw_init(void *handle) return ret; } -static int pp_hw_fini(void *handle) +static int pp_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -217,9 +215,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) } } -static int pp_late_init(void *handle) +static int pp_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; if (hwmgr && hwmgr->pm_en) @@ -231,9 +229,9 @@ static int pp_late_init(void *handle) return 0; } -static void pp_late_fini(void *handle) +static void pp_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; if (adev->pm.smu_prv_buffer) amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); @@ -246,25 +244,15 @@ static bool pp_is_idle(void *handle) return false; } -static int pp_wait_for_idle(void *handle) -{ - return 0; -} - -static int pp_sw_reset(void *handle) -{ - return 0; -} - static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { return 0; } -static int pp_suspend(void *handle) +static int pp_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); @@ -272,10 +260,9 @@ static int pp_suspend(void *handle) return hwmgr_suspend(hwmgr); } -static int pp_resume(void *handle) +static int pp_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; return hwmgr_resume(hwmgr); } @@ -298,12 +285,8 @@ static const struct amd_ip_funcs pp_ip_funcs = { .suspend = pp_suspend, .resume = pp_resume, .is_idle = pp_is_idle, - .wait_for_idle = pp_wait_for_idle, - .soft_reset = pp_sw_reset, .set_clockgating_state = pp_set_clockgating_state, .set_powergating_state = pp_set_powergating_state, - .dump_ip_state = NULL, - .print_ip_state = NULL, }; const struct amdgpu_ip_block_version pp_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index b56298d9da98..fe24219c3bf4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -28,7 +28,6 @@ #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" -#include "ppevvmath.h" #define MEM_ID_MASK 0xff000000 #define MEM_ID_SHIFT 24 @@ -677,433 +676,6 @@ bool atomctrl_get_pp_assign_pin( return bRet; } -int atomctrl_calculate_voltage_evv_on_sclk( - struct pp_hwmgr *hwmgr, - uint8_t voltage_type, - uint32_t sclk, - uint16_t virtual_voltage_Id, - uint16_t *voltage, - uint16_t dpm_level, - bool debug) -{ - ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - struct amdgpu_device *adev = hwmgr->adev; - EFUSE_LINEAR_FUNC_PARAM sRO_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; - EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; - EFUSE_INPUT_PARAMETER sInput_FuseValues; - READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; - - uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; - fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; - fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; - fInt fLkg_FT, repeat; - fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; - fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; - fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; - fInt fSclk_margin, fSclk, fEVV_V; - fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; - uint32_t ul_FT_Lkg_V0NORM; - fInt fLn_MaxDivMin, fMin, fAverage, fRange; - fInt fRoots[2]; - fInt fStepSize = GetScaledFraction(625, 100000); - - int result; - - getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - smu_atom_get_data_table(hwmgr->adev, - GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), - NULL, NULL, NULL); - - if (!getASICProfilingInfo) - return -1; - - if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) - return -1; - - /*----------------------------------------------------------- - *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL - *----------------------------------------------------------- - */ - fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); - - switch (dpm_level) { - case 1: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); - break; - case 2: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); - break; - case 3: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); - break; - case 4: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); - break; - case 5: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); - break; - case 6: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); - break; - case 7: - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); - break; - default: - pr_err("DPM Level not supported\n"); - fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); - } - - /*------------------------- - * DECODING FUSE VALUES - * ------------------------ - */ - /*Decode RO_Fused*/ - sRO_fuse = getASICProfilingInfo->sRoFuse; - - sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - /* Finally, the actual fuse value */ - ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); - fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); - fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); - - sCACm_fuse = getASICProfilingInfo->sCACm; - - sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); - - fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); - - sCACb_fuse = getASICProfilingInfo->sCACb; - - sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); - fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); - - fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); - - sKt_Beta_fuse = getASICProfilingInfo->sKt_b; - - sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); - - fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, - fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); - - sKv_m_fuse = getASICProfilingInfo->sKv_m; - - sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - if (result) - return result; - - ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); - fRange = fMultiply(fRange, ConvertToFraction(-1)); - - fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, - fAverage, fRange, sKv_m_fuse.ucEfuseLength); - - sKv_b_fuse = getASICProfilingInfo->sKv_b; - - sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; - sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; - sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); - fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); - - fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, - fAverage, fRange, sKv_b_fuse.ucEfuseLength); - - /* Decoding the Leakage - No special struct container */ - /* - * usLkgEuseIndex=56 - * ucLkgEfuseBitLSB=6 - * ucLkgEfuseLength=10 - * ulLkgEncodeLn_MaxDivMin=69077 - * ulLkgEncodeMax=1000000 - * ulLkgEncodeMin=1000 - * ulEfuseLogisticAlpha=13 - */ - - sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; - sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; - sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; - - sOutput_FuseValues.sEfuse = sInput_FuseValues; - - result = amdgpu_atom_execute_table(adev->mode_info.atom_context, - GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); - - if (result) - return result; - - ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); - fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); - fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); - - fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, - fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); - fLkg_FT = fFT_Lkg_V0NORM; - - /*------------------------------------------- - * PART 2 - Grabbing all required values - *------------------------------------------- - */ - fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); - fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); - fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); - fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); - fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); - fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); - fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); - fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), - ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); - - fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); - fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); - fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); - - fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); - - fMargin_FMAX_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); - fMargin_Plat_mean = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); - fMargin_FMAX_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); - fMargin_Plat_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); - - fMargin_DC_sigma = GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); - fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); - - fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); - fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); - fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); - fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); - fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); - - fSclk = GetScaledFraction(sclk, 100); - - fV_max = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); - fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); - fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); - fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); - fV_FT = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); - fV_min = fDivide(GetScaledFraction( - le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); - - /*----------------------- - * PART 3 - *----------------------- - */ - - fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); - fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); - fC_Term = fAdd(fMargin_RO_c, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), - fAdd(fMultiply(fSM_A3, fSclk), - fSubtract(fSM_A7, fRO_fused))))); - - fVDDC_base = fSubtract(fRO_fused, - fSubtract(fMargin_RO_c, - fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); - fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); - - repeat = fSubtract(fVDDC_base, - fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); - - fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, - fGetSquare(repeat)), - fAdd(fMultiply(fMargin_RO_b, repeat), - fMargin_RO_c)); - - fDC_SCLK = fSubtract(fRO_fused, - fSubtract(fRO_DC_margin, - fSubtract(fSM_A3, - fMultiply(fSM_A2, repeat)))); - fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); - - fSigma_DC = fSubtract(fSclk, fDC_SCLK); - - fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); - fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); - fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); - fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); - - fSquared_Sigma_DC = fGetSquare(fSigma_DC); - fSquared_Sigma_CR = fGetSquare(fSigma_CR); - fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); - - fSclk_margin = fAdd(fMicro_FMAX, - fAdd(fMicro_CR, - fAdd(fMargin_fixed, - fSqrt(fAdd(fSquared_Sigma_FMAX, - fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); - /* - fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; - fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; - fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; - */ - - fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); - fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); - fC_Term = fAdd(fRO_DC_margin, - fAdd(fMultiply(fSM_A0, fLkg_FT), - fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), - fAdd(fSclk, fSclk_margin)), - fAdd(fMultiply(fSM_A3, - fAdd(fSclk, fSclk_margin)), - fSubtract(fSM_A7, fRO_fused))))); - - SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); - - if (GreaterThan(fRoots[0], fRoots[1])) - fEVV_V = fRoots[1]; - else - fEVV_V = fRoots[0]; - - if (GreaterThan(fV_min, fEVV_V)) - fEVV_V = fV_min; - else if (GreaterThan(fEVV_V, fV_max)) - fEVV_V = fSubtract(fV_max, fStepSize); - - fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); - - /*----------------- - * PART 4 - *----------------- - */ - - fV_x = fV_min; - - while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { - fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( - fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), - fGetSquare(fV_x)), fDerateTDP); - - fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, - fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, - fT_prod), fKv_b_fused), fV_x)), fV_x))); - fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_prod))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); - fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( - fKt_Beta_fused, fT_FT))); - - fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); - - fTDP_Current = fDivide(fTDP_Power, fV_x); - - fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), - ConvertToFraction(10))); - - fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); - - if (GreaterThan(fV_max, fV_NL) && - (GreaterThan(fV_NL, fEVV_V) || - Equal(fV_NL, fEVV_V))) { - fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); - - *voltage = (uint16_t)fV_NL.partial.real; - break; - } else - fV_x = fAdd(fV_x, fStepSize); - } - - return result; -} - /** * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. * @hwmgr: input: pointer to hwManager diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index 1f987e846628..22b0ac12df97 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, pp_atomctrl_clock_dividers_kong *dividers); extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t *efuse); -extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers); extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h deleted file mode 100644 index 409aeec6baa9..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h +++ /dev/null @@ -1,561 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <asm/div64.h> - -enum ppevvmath_constants { - /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */ - SHIFT_AMOUNT = 16, - - /* Change this value to change the number of decimal places in the final output - 5 is a good default */ - PRECISION = 5, - - SHIFTED_2 = (2 << SHIFT_AMOUNT), - - /* 32767 - Might change in the future */ - MAX = (1 << (SHIFT_AMOUNT - 1)) - 1, -}; - -/* ------------------------------------------------------------------------------- - * NEW TYPE - fINT - * ------------------------------------------------------------------------------- - * A variable of type fInt can be accessed in 3 ways using the dot (.) operator - * fInt A; - * A.full => The full number as it is. Generally not easy to read - * A.partial.real => Only the integer portion - * A.partial.decimal => Only the fractional portion - */ -typedef union _fInt { - int full; - struct _partial { - unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/ - int real: 32 - SHIFT_AMOUNT; - } partial; -} fInt; - -/* ------------------------------------------------------------------------------- - * Function Declarations - * ------------------------------------------------------------------------------- - */ -static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -static fInt fNegate(fInt); /* Returns -1 * input fInt value */ -static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -static fInt fDivide (fInt A, fInt B); /* Returns A/B */ -static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -static int uAbs(int); /* Returns the Absolute value of the Int */ -static int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ - -/* Fuse decoding functions - * ------------------------------------------------------------------------------------- - */ -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); - -/* Internal Support Functions - Use these ONLY for testing or adding to internal functions - * ------------------------------------------------------------------------------------- - * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. - */ -static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ -static fInt fNegate(fInt); - -static int uGetScaledDecimal (fInt); /* Internal function */ -static int GetReal (fInt A); /* Internal function */ - -/* ------------------------------------------------------------------------------------- - * TROUBLESHOOTING INFORMATION - * ------------------------------------------------------------------------------------- - * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767) - * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767) - * 3) fMultiply - OutputOutOfRangeException: - * 4) fGetSquare - OutputOutOfRangeException: - * 5) fDivide - DivideByZeroException - * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number - */ - -/* ------------------------------------------------------------------------------------- - * START OF CODE - * ------------------------------------------------------------------------------------- - */ -static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ -{ - uint32_t i; - bool bNegated = false; - - fInt fPositiveOne = ConvertToFraction(1); - fInt fZERO = ConvertToFraction(0); - - fInt lower_bound = Divide(78, 10000); - fInt solution = fPositiveOne; /*Starting off with baseline of 1 */ - fInt error_term; - - static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - - if (GreaterThan(fZERO, exponent)) { - exponent = fNegate(exponent); - bNegated = true; - } - - while (GreaterThan(exponent, lower_bound)) { - for (i = 0; i < 11; i++) { - if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) { - exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000)); - solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000)); - } - } - } - - error_term = fAdd(fPositiveOne, exponent); - - solution = fMultiply(solution, error_term); - - if (bNegated) - solution = fDivide(fPositiveOne, solution); - - return solution; -} - -static fInt fNaturalLog(fInt value) -{ - uint32_t i; - fInt upper_bound = Divide(8, 1000); - fInt fNegativeOne = ConvertToFraction(-1); - fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */ - fInt error_term; - - static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078}; - static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78}; - - while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) { - for (i = 0; i < 10; i++) { - if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) { - value = fDivide(value, GetScaledFraction(k_array[i], 10000)); - solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000)); - } - } - } - - error_term = fAdd(fNegativeOne, value); - - return fAdd(solution, error_term); -} - -static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_decoded_value; - - f_decoded_value = fDivide(f_fuse_value, f_bit_max_value); - f_decoded_value = fMultiply(f_decoded_value, f_range); - f_decoded_value = fAdd(f_decoded_value, f_min); - - return f_decoded_value; -} - - -static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) -{ - fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fInt f_CONSTANT_NEG13 = ConvertToFraction(-13); - fInt f_CONSTANT1 = ConvertToFraction(1); - - fInt f_decoded_value; - - f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1); - f_decoded_value = fNaturalLog(f_decoded_value); - f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13)); - f_decoded_value = fAdd(f_decoded_value, f_average); - - return f_decoded_value; -} - -static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) -{ - fInt fLeakage; - fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); - - fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse)); - fLeakage = fDivide(fLeakage, f_bit_max_value); - fLeakage = fExponential(fLeakage); - fLeakage = fMultiply(fLeakage, f_min); - - return fLeakage; -} - -static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt fNegate(fInt X) -{ - fInt CONSTANT_NEGONE = ConvertToFraction(-1); - return fMultiply(X, CONSTANT_NEGONE); -} - -static fInt Convert_ULONG_ToFraction(uint32_t X) -{ - fInt temp; - - if (X <= MAX) - temp.full = (X << SHIFT_AMOUNT); - else - temp.full = 0; - - return temp; -} - -static fInt GetScaledFraction(int X, int factor) -{ - int times_shifted, factor_shifted; - bool bNEGATED; - fInt fValue; - - times_shifted = 0; - factor_shifted = 0; - bNEGATED = false; - - if (X < 0) { - X = -1*X; - bNEGATED = true; - } - - if (factor < 0) { - factor = -1*factor; - bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */ - } - - if ((X > MAX) || factor > MAX) { - if ((X/factor) <= MAX) { - while (X > MAX) { - X = X >> 1; - times_shifted++; - } - - while (factor > MAX) { - factor = factor >> 1; - factor_shifted++; - } - } else { - fValue.full = 0; - return fValue; - } - } - - if (factor == 1) - return ConvertToFraction(X); - - fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor)); - - fValue.full = fValue.full << times_shifted; - fValue.full = fValue.full >> factor_shifted; - - return fValue; -} - -/* Addition using two fInts */ -static fInt fAdd (fInt X, fInt Y) -{ - fInt Sum; - - Sum.full = X.full + Y.full; - - return Sum; -} - -/* Addition using two fInts */ -static fInt fSubtract (fInt X, fInt Y) -{ - fInt Difference; - - Difference.full = X.full - Y.full; - - return Difference; -} - -static bool Equal(fInt A, fInt B) -{ - if (A.full == B.full) - return true; - else - return false; -} - -static bool GreaterThan(fInt A, fInt B) -{ - if (A.full > B.full) - return true; - else - return false; -} - -static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ -{ - fInt Product; - int64_t tempProduct; - - /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/ - /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION - bool X_LessThanOne, Y_LessThanOne; - - X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0); - Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0); - - if (X_LessThanOne && Y_LessThanOne) { - Product.full = X.full * Y.full; - return Product - }*/ - - tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */ - tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */ - Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */ - - return Product; -} - -static fInt fDivide (fInt X, fInt Y) -{ - fInt fZERO, fQuotient; - int64_t longlongX, longlongY; - - fZERO = ConvertToFraction(0); - - if (Equal(Y, fZERO)) - return fZERO; - - longlongX = (int64_t)X.full; - longlongY = (int64_t)Y.full; - - longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */ - - div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */ - - fQuotient.full = (int)longlongX; - return fQuotient; -} - -static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ -{ - fInt fullNumber, scaledDecimal, scaledReal; - - scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */ - - scaledDecimal.full = uGetScaledDecimal(A); - - fullNumber = fAdd(scaledDecimal, scaledReal); - - return fullNumber.full; -} - -static fInt fGetSquare(fInt A) -{ - return fMultiply(A, A); -} - -/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -static fInt fSqrt(fInt num) -{ - fInt F_divide_Fprime, Fprime; - fInt test; - fInt twoShifted; - int seed, counter, error; - fInt x_new, x_old, C, y; - - fInt fZERO = ConvertToFraction(0); - - /* (0 > num) is the same as (num < 0), i.e., num is negative */ - - if (GreaterThan(fZERO, num) || Equal(fZERO, num)) - return fZERO; - - C = num; - - if (num.partial.real > 3000) - seed = 60; - else if (num.partial.real > 1000) - seed = 30; - else if (num.partial.real > 100) - seed = 10; - else - seed = 2; - - counter = 0; - - if (Equal(num, fZERO)) /*Square Root of Zero is zero */ - return fZERO; - - twoShifted = ConvertToFraction(2); - x_new = ConvertToFraction(seed); - - do { - counter++; - - x_old.full = x_new.full; - - test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */ - y = fSubtract(test, C); /*y = f(x) = x^2 - C; */ - - Fprime = fMultiply(twoShifted, x_old); - F_divide_Fprime = fDivide(y, Fprime); - - x_new = fSubtract(x_old, F_divide_Fprime); - - error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old); - - if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/ - return x_new; - - } while (uAbs(error) > 0); - - return x_new; -} - -static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) -{ - fInt *pRoots = &Roots[0]; - fInt temp, root_first, root_second; - fInt f_CONSTANT10, f_CONSTANT100; - - f_CONSTANT100 = ConvertToFraction(100); - f_CONSTANT10 = ConvertToFraction(10); - - while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { - A = fDivide(A, f_CONSTANT10); - B = fDivide(B, f_CONSTANT10); - C = fDivide(C, f_CONSTANT10); - } - - temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */ - temp = fMultiply(temp, C); /* root = 4*A*C */ - temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */ - temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */ - - root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */ - root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */ - - root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */ - root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */ - - *(pRoots + 0) = root_first; - *(pRoots + 1) = root_second; -} - -/* ----------------------------------------------------------------------------- - * SUPPORT FUNCTIONS - * ----------------------------------------------------------------------------- - */ - -/* Conversion Functions */ -static int GetReal (fInt A) -{ - return (A.full >> SHIFT_AMOUNT); -} - -static fInt Divide (int X, int Y) -{ - fInt A, B, Quotient; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Quotient = fDivide(A, B); - - return Quotient; -} - -static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ -{ - int dec[PRECISION]; - int i, scaledDecimal = 0, tmp = A.partial.decimal; - - for (i = 0; i < PRECISION; i++) { - dec[i] = tmp / (1 << SHIFT_AMOUNT); - tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); - tmp *= 10; - scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i); - } - - return scaledDecimal; -} - -static int uPow(int base, int power) -{ - if (power == 0) - return 1; - else - return (base)*uPow(base, power - 1); -} - -static int uAbs(int X) -{ - if (X < 0) - return (X * -1); - else - return X; -} - -static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) -{ - fInt solution; - - solution = fDivide(A, fStepSize); - solution.partial.decimal = 0; /*All fractional digits changes to 0 */ - - if (error_term) - solution.partial.real += 1; /*Error term of 1 added */ - - solution = fMultiply(solution, fStepSize); - solution = fAdd(solution, fStepSize); - - return solution; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 79c817752a33..2b446f8866ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) return table_address; } -#if 0 -static void dump_pptable(PPTable_t *pptable) -{ - int i; - - pr_info("Version = 0x%08x\n", pptable->Version); - - pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0); - pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau); - pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1); - pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau); - pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2); - pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau); - pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3); - pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau); - pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc); - pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau); - pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); - pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); - pr_info("ThbmLimit = %d\n", pptable->ThbmLimit); - pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit); - pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit); - pr_info("TplxLimit = %d\n", pptable->TplxLimit); - pr_info("FitLimit = %d\n", pptable->FitLimit); - - pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); - pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits); - pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit); - - pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); - pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - - pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid); - pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid); - pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - pr_info("Padding234 = 0x%02x\n", pptable->Padding234); - - pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - pr_info("[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); - - pr_info("[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); - - pr_info("[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); - - pr_info("[PPCLK_ECLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_ECLK].padding, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); - - pr_info("[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); - - pr_info("[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); - - pr_info("[PPCLK_DCEFCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCEFCLK].padding, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); - - pr_info("[PPCLK_DISPCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DISPCLK].padding, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); - - pr_info("[PPCLK_PIXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PIXCLK].padding, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); - - pr_info("[PPCLK_PHYCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_PHYCLK].padding, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); - - pr_info("[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); - - - pr_info("FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - pr_info("FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - pr_info("FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - pr_info("FreqTableEclk\n"); - for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]); - - pr_info("FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - pr_info("FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - pr_info("FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - pr_info("FreqTableDcefclk\n"); - for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]); - - pr_info("FreqTableDispclk\n"); - for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]); - - pr_info("FreqTablePixclk\n"); - for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]); - - pr_info("FreqTablePhyclk\n"); - for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++) - pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]); - - pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]); - pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]); - pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]); - pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]); - pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]); - pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]); - pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks); - - pr_info("Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - pr_info("Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq); - pr_info("Padding789 = 0x%x\n", pptable->Padding789); - pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n", - pptable->CksVoltageOffset.a, - pptable->CksVoltageOffset.b, - pptable->CksVoltageOffset.c); - pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); - pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); - pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); - pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); - pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); - pr_info("Padding456 = 0x%x\n", pptable->Padding456); - - pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv); - pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]); - pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]); - pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]); - - pr_info("PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]); - - pr_info("PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]); - - pr_info("LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]); - - pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); - pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); - pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); - - pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); - pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); - pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); - pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); - pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); - pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); - pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - - pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOn.a, - pptable->dBtcGbGfxCksOn.b, - pptable->dBtcGbGfxCksOn.c); - pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxCksOff.a, - pptable->dBtcGbGfxCksOff.b, - pptable->dBtcGbGfxCksOff.c); - pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - pr_info("XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - pr_info("XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - pr_info("XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - pr_info("XgmiUclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]); - pr_info("XgmiSocclkFreq\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]); - pr_info("XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - - pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); - pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); - - pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm); - pr_info("padding16_Fan = %d\n", pptable->padding16_Fan); - - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - - pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - for (i = 0; i < 11; i++) - pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); - - for (i = 0; i < 3; i++) - pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]); - - pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - - pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V); - - pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); - pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - pr_info("SocOffset = 0x%x\n", pptable->SocOffset); - pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset); - pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset); - pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - pr_info("AcDcGpio = %d\n", pptable->AcDcGpio); - pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity); - pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); - pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - - pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); - pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - pr_info("Padding1 = 0x%x\n", pptable->Padding1); - pr_info("Padding2 = 0x%x\n", pptable->Padding2); - - pr_info("LedPin0 = %d\n", pptable->LedPin0); - pr_info("LedPin1 = %d\n", pptable->LedPin1); - pr_info("LedPin2 = %d\n", pptable->LedPin2); - pr_info("padding8_4 = 0x%x\n", pptable->padding8_4); - - pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - pr_info("I2cControllers[%d]:\n", i); - pr_info(" .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - pr_info(" .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - pr_info(" .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - pr_info(" .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - pr_info(" .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrottler); - pr_info(" .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - pr_info(" .I2cSpeed = %d\n", - pptable->I2cControllers[i].I2cSpeed); - } - - for (i = 0; i < 10; i++) - pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); - - for (i = 0; i < 8; i++) - pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]); -} -#endif - static int check_powerplay_tables( struct pp_hwmgr *hwmgr, const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) @@ -652,8 +80,6 @@ static int check_powerplay_tables( return -EINVAL; } - //dump_pptable(&powerplay_table->smcPPTable); - return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 80e60ea2d11e..64f917959576 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -140,7 +140,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, min, - max); + max, + false); return ret; } @@ -251,7 +252,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu, if (atomic_read(&power_gate->vcn_gated) ^ enable) return 0; - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff); if (!ret) atomic_set(&power_gate->vcn_gated, !enable); @@ -549,7 +550,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return false; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && + amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) return true; return false; @@ -741,9 +743,9 @@ static int smu_set_funcs(struct amdgpu_device *adev) return 0; } -static int smu_early_init(void *handle) +static int smu_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu; int r; @@ -825,9 +827,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu) return smu_set_config_table(smu, &adev->pm.config_table); } -static int smu_late_init(void *handle) +static int smu_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; @@ -1242,9 +1244,9 @@ static bool smu_is_workload_profile_available(struct smu_context *smu, return smu->workload_map && smu->workload_map[profile].valid_mapping; } -static int smu_sw_init(void *handle) +static int smu_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1259,26 +1261,33 @@ static int smu_sw_init(void *handle) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - else - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -1326,9 +1335,9 @@ static int smu_sw_init(void *handle) return 0; } -static int smu_sw_fini(void *handle) +static int smu_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -1799,10 +1808,10 @@ static int smu_start_smc_engine(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_hw_init(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { @@ -2021,9 +2030,9 @@ static int smu_reset_mp1_state(struct smu_context *smu) return ret; } -static int smu_hw_fini(void *handle) +static int smu_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; @@ -2054,9 +2063,9 @@ static int smu_hw_fini(void *handle) return 0; } -static void smu_late_fini(void *handle) +static void smu_late_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; kfree(smu); @@ -2065,26 +2074,31 @@ static void smu_late_fini(void *handle) static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + struct amdgpu_ip_block *ip_block; int ret; - ret = smu_hw_fini(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); + if (!ip_block) + return -EINVAL; + + ret = smu_hw_fini(ip_block); if (ret) return ret; - ret = smu_hw_init(adev); + ret = smu_hw_init(ip_block); if (ret) return ret; - ret = smu_late_init(adev); + ret = smu_late_init(ip_block); if (ret) return ret; return 0; } -static int smu_suspend(void *handle) +static int smu_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int ret; uint64_t count; @@ -2116,10 +2130,10 @@ static int smu_suspend(void *handle) return 0; } -static int smu_resume(void *handle) +static int smu_resume(struct amdgpu_ip_block *ip_block) { int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) @@ -2348,17 +2362,20 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); @@ -2878,6 +2895,10 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; case OD_FAN_MINIMUM_PWM: clk_type = SMU_OD_FAN_MINIMUM_PWM; break; + case OD_FAN_ZERO_RPM_ENABLE: + clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; + case OD_FAN_ZERO_RPM_STOP_TEMP: + clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; default: clk_type = SMU_CLK_COUNT; break; } @@ -3049,12 +3070,23 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (smu->user_dpm_profile.user_workload_mask & + (1 << smu->workload_priority[param[param_size]])) + return 0; + + smu->user_dpm_profile.user_workload_mask = + (1 << smu->workload_priority[param[param_size]]); + smu->workload_mask = smu->user_dpm_profile.user_workload_mask | + smu->driver_workload_mask; + ret = smu_bump_power_profile_mode(smu, param, param_size); + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..d665c47f19b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,7 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +558,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; @@ -739,7 +741,7 @@ struct pptable_funcs { * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power * management. */ - int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); /** * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power @@ -859,11 +861,6 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); /** - * @dump_pptable: Print the power play table to the system log. - */ - void (*dump_pptable)(struct smu_context *smu); - - /** * @get_power_limit: Get the device's power limits. */ int (*get_power_limit)(struct smu_context *smu, @@ -1260,7 +1257,8 @@ struct pptable_funcs { * @set_soft_freq_limited_range: Set the soft frequency range of a clock * domain in MHz. */ - int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, + bool automatic); /** * @set_power_source: Notify the SMU of the current power source. diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 822c6425d90e..0f96b8c59a0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xD +#define SMU_METRICS_TABLE_VERSION 0xE typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -231,6 +231,9 @@ typedef struct __attribute__((packed, aligned(4))) { // PER XCD ACTIVITY uint32_t GfxBusy[8]; uint64_t GfxBusyAcc[8]; + + //PCIE BW Data and error count + uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index e71a721c12b9..a299dc4a8071 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -313,6 +313,8 @@ enum smu_clk_type { SMU_OD_ACOUSTIC_TARGET, SMU_OD_FAN_TARGET_TEMPERATURE, SMU_OD_FAN_MINIMUM_PWM, + SMU_OD_FAN_ZERO_RPM_ENABLE, + SMU_OD_FAN_ZERO_RPM_STOP_TEMP, SMU_CLK_COUNT, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index c2ab336bb530..ed8304d82831 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h index 1ad2dff71090..0886d8cffbd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h @@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu); int smu_v12_0_mode2_reset(struct smu_context *smu); int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v12_0_set_driver_table_location(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index e58220a7ee2f..ae3563d71fa0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -219,7 +219,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -255,7 +255,8 @@ int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v13_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 727d5b405435..0546b02e198d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -186,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t *min, uint32_t *max); int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool automatic); int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -210,7 +210,8 @@ int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable); + bool enable, + int inst); int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..4b36c230e43a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { if (size != 10) @@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } @@ -1559,437 +1558,6 @@ static int arcturus_set_performance_level(struct smu_context *smu, return smu_v11_0_set_performance_level(smu, level); } -static void arcturus_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]); - } - - dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc); - dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); - dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx); - dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); - - dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit); - dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit); - dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit); - dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); - dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit); - dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit); - dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit); - - dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit); - dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); - - dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding); - - dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); - dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]); - dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]); - dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK].padding, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK].padding, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); - dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]); - dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]); - dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]); - dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]); - dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456); - - dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm); - dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); - dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); - dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); - - dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge); - dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot); - dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx); - dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc); - dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem); - dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm); - - dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); - dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); - dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); - - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxAfll.a, - pptable->dBtcGbGfxAfll.b, - pptable->dBtcGbGfxAfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv); - - dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1); - dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2); - - dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow); - dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); - dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); - - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping); - dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping); - - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent); - dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset); - dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem); - - dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent); - dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset); - dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput); - - dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); - dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); - dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = %d\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = %d\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = %d\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = %d\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = %d\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .Speed = %d\n", - pptable->I2cControllers[i].Speed); - } - - dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth); - - dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); - -} - static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; @@ -2002,7 +1570,9 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -2365,8 +1935,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, .set_performance_level = arcturus_set_performance_level, - /* debug (internal used) */ - .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..211635dabed8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1135,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) return 0; } -static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int navi10_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1689,7 +1691,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return 0; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return 0; break; @@ -2081,10 +2083,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..d0ed0d060a8a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1152,7 +1152,9 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) return 0; } -static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; @@ -1469,7 +1471,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; - ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto forec_level_out; break; @@ -1786,10 +1788,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } @@ -2493,1274 +2498,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) return val != 0x0; } -static void beige_goby_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_beige_goby_t *pptable = table_context->driver_pptable; - int i; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - -static void sienna_cichlid_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - int i; - - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(11, 0, 13)) { - beige_goby_dump_pptable(smu); - return; - } - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); - - for (i = 0; i < PPT_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]); - dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]); - } - - for (i = 0; i < TDC_THROTTLER_COUNT; i++) { - dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]); - dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]); - } - - for (i = 0; i < TEMP_COUNT; i++) { - dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]); - } - - dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit); - dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig); - dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]); - dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]); - dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]); - - dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit); - for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) { - dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]); - dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]); - } - dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask); - - dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask); - - dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc); - dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx); - dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx); - dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc); - - dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin); - dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin); - - dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold); - dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]); - dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]); - - dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx); - dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc); - dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx); - dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc); - - dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx); - dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc); - - dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin); - dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin); - dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp); - dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp); - dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis); - dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis); - - dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_UCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_UCLK].Padding, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_UCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_FCLK]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, - pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_FCLK].Padding, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, - pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, - pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, - pptable->DpmDescriptor[PPCLK_FCLK].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16); - - dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n" - " .VoltageMode = 0x%02x\n" - " .SnapToDiscrete = 0x%02x\n" - " .NumDiscreteLevels = 0x%02x\n" - " .padding = 0x%02x\n" - " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" - " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" - " .SsFmin = 0x%04x\n" - " .Padding_16 = 0x%04x\n", - pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode, - pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete, - pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m, - pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c, - pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin, - pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16); - - dev_info(smu->adev->dev, "FreqTableGfx\n"); - for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]); - - dev_info(smu->adev->dev, "FreqTableVclk\n"); - for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]); - - dev_info(smu->adev->dev, "FreqTableDclk\n"); - for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]); - - dev_info(smu->adev->dev, "FreqTableSocclk\n"); - for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]); - - dev_info(smu->adev->dev, "FreqTableUclk\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]); - - dev_info(smu->adev->dev, "FreqTableFclk\n"); - for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]); - - dev_info(smu->adev->dev, "DcModeMaxFreq\n"); - dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); - dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); - dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); - dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]); - dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]); - dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]); - - dev_info(smu->adev->dev, "FreqTableUclkDiv\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]); - - dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq); - dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding); - - dev_info(smu->adev->dev, "Mp0clkFreq\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]); - - dev_info(smu->adev->dev, "Mp0DpmVoltage\n"); - for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]); - - dev_info(smu->adev->dev, "MemVddciVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]); - - dev_info(smu->adev->dev, "MemMvddVoltage\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]); - - dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry); - dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit); - dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); - dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource); - dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding); - - dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask); - - dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask); - dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]); - dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow); - dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]); - dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]); - dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]); - dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]); - dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt); - dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt); - dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt); - - dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage); - dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime); - dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime); - dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum); - dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis); - dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout); - - dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]); - dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]); - dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]); - dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]); - dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]); - - dev_info(smu->adev->dev, "FlopsPerByteTable\n"); - for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]); - - dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv); - dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]); - dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]); - dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]); - - dev_info(smu->adev->dev, "UclkDpmPstates\n"); - for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]); - - dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmSrcFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n"); - dev_info(smu->adev->dev, " .Fmin = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmin); - dev_info(smu->adev->dev, " .Fmax = 0x%x\n", - pptable->UclkDpmTargFreqRange.Fmax); - dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq); - dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding); - - dev_info(smu->adev->dev, "PcieGenSpeed\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]); - - dev_info(smu->adev->dev, "PcieLaneCount\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]); - - dev_info(smu->adev->dev, "LclkFreq\n"); - for (i = 0; i < NUM_LINK_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]); - - dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp); - dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp); - - dev_info(smu->adev->dev, "FanGain\n"); - for (i = 0; i < TEMP_COUNT; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]); - - dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin); - dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm); - dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm); - dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm); - dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm); - dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature); - dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk); - dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16); - dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect); - dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding); - dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable); - dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev); - - dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta); - dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved); - - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect); - dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs); - - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, - pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, - pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); - dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxPll.a, - pptable->dBtcGbGfxPll.b, - pptable->dBtcGbGfxPll.c); - dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbGfxDfll.a, - pptable->dBtcGbGfxDfll.b, - pptable->dBtcGbGfxDfll.c); - dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->dBtcGbSoc.a, - pptable->dBtcGbSoc.b, - pptable->dBtcGbSoc.c); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, - pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); - dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", - pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, - pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); - - dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n"); - for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) { - dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]); - dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n", - i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]); - } - - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); - dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, - pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); - - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); - dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); - - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); - dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - - dev_info(smu->adev->dev, "XgmiDpmPstates\n"); - for (i = 0; i < NUM_XGMI_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]); - dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); - dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); - - dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides); - dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation0.a, - pptable->ReservedEquation0.b, - pptable->ReservedEquation0.c); - dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation1.a, - pptable->ReservedEquation1.b, - pptable->ReservedEquation1.c); - dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation2.a, - pptable->ReservedEquation2.b, - pptable->ReservedEquation2.c); - dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", - pptable->ReservedEquation3.a, - pptable->ReservedEquation3.b, - pptable->ReservedEquation3.c); - - dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]); - dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]); - dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]); - dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]); - dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]); - dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]); - dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]); - dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]); - - dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]); - dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]); - dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]); - dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]); - dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]); - dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]); - - for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { - dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i); - dev_info(smu->adev->dev, " .Enabled = 0x%x\n", - pptable->I2cControllers[i].Enabled); - dev_info(smu->adev->dev, " .Speed = 0x%x\n", - pptable->I2cControllers[i].Speed); - dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n", - pptable->I2cControllers[i].SlaveAddress); - dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n", - pptable->I2cControllers[i].ControllerPort); - dev_info(smu->adev->dev, " .ControllerName = 0x%x\n", - pptable->I2cControllers[i].ControllerName); - dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n", - pptable->I2cControllers[i].ThermalThrotter); - dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n", - pptable->I2cControllers[i].I2cProtocol); - dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n", - pptable->I2cControllers[i].PaddingConfig); - } - - dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl); - dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda); - dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr); - dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]); - - dev_info(smu->adev->dev, "Board Parameters:\n"); - dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); - dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); - dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); - dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); - dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask); - dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask); - - dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); - dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset); - dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); - - dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); - dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset); - dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); - - dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); - dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); - - dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); - dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset); - dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); - - dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio); - - dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio); - dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity); - dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio); - dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity); - dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio); - dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity); - dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio); - dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity); - dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0); - dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1); - dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2); - dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask); - dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie); - dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError); - dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]); - dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]); - - dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled); - dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent); - dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq); - - dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding); - dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq); - - dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled); - dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent); - dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq); - - dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled); - dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth); - dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]); - dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]); - dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]); - - dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower); - dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding); - - dev_info(smu->adev->dev, "XgmiLinkSpeed\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]); - dev_info(smu->adev->dev, "XgmiLinkWidth\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]); - dev_info(smu->adev->dev, "XgmiFclkFreq\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]); - dev_info(smu->adev->dev, "XgmiSocVoltage\n"); - for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) - dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]); - - dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled); - dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled); - dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]); - dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]); - - dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]); - dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]); - dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]); - dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]); - dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]); - dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]); - dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]); - dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]); - dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]); - dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]); - dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]); - - dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]); - dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]); - dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]); - dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]); - dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]); - dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]); - dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]); - dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]); -} - static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { @@ -4397,7 +3134,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch, .get_power_limit = sienna_cichlid_get_power_limit, .update_pcie_parameters = sienna_cichlid_update_pcie_parameters, - .dump_pptable = sienna_cichlid_dump_pptable, .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, .fini_microcode = smu_v11_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 16fcd9dcd202..480cf3cb204d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1616,7 +1616,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) break; default: if (!ras || !adev->ras_enabled || - adev->gmc.xgmi.pending_reset) { + (adev->init_lvl->level == + AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); @@ -1763,7 +1764,8 @@ failed: int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1778,7 +1780,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1786,7 +1791,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1854,6 +1862,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, uint32_t mclk_min = 0, mclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; int ret = 0; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1873,6 +1882,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, mclk_max = mem_table->max; socclk_min = soc_table->min; socclk_max = soc_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1905,13 +1915,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; } @@ -1920,7 +1932,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; } @@ -1929,7 +1942,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 1fe020f1f4db..f89c487dce72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -461,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu) return smu_v11_0_init_smc_tables(smu); } -static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -1079,7 +1081,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1087,15 +1089,16 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { int ret = 0; @@ -1301,7 +1304,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1337,7 +1340,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; @@ -1356,7 +1359,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); if (ret) return ret; @@ -1364,7 +1367,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); if (ret) return ret; @@ -1372,7 +1375,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); if (ret) return ret; @@ -1380,7 +1383,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); + ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..75a9ea87f419 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -645,7 +645,9 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context return pm_type; } -static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int renoir_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -707,7 +709,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -740,7 +742,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -890,14 +892,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } @@ -911,7 +913,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -919,7 +921,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -961,13 +963,13 @@ static int renior_set_dpm_profile_freq(struct smu_context *smu, } if (sclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false); if (socclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false); if (fclk) - ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk); + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index ed15f5a0fd11..3d3cd546f0ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu) } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool automatic) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 2c35eb31475a..f6b029354327 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1297,9 +1297,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu, } static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1328,7 +1329,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, return 0; ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, - min, max); + min, max, false); if (!ret) { pstate_table->gfxclk_pstate.curr.min = min; pstate_table->gfxclk_pstate.curr.max = max; @@ -1348,7 +1349,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, /* Restore default min/max clocks and enable determinism */ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); if (!ret) { usleep_range(500, 1000); ret = smu_cmn_send_smc_msg_with_param(smu, @@ -1422,7 +1423,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1441,7 +1442,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); } break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index e17466cc1952..2bfea740dace 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1608,7 +1608,8 @@ failed: int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1623,7 +1624,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1631,7 +1635,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1708,6 +1715,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1739,6 +1747,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1780,13 +1789,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, vclk_min = vclk_max = 0; dclk_min = dclk_max = 0; fclk_min = fclk_max = 0; + auto_level = false; } if (sclk_min && sclk_max) { ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1798,7 +1809,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1810,7 +1822,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1825,7 +1838,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1840,7 +1854,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1852,7 +1867,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -2088,7 +2104,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) } int smu_v13_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index d53e162dcd8d..80c6b1e523aa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -107,6 +107,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_0_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static int smu_v13_0_0_system_features_control(struct smu_context *smu, bool en) { @@ -1143,6 +1132,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1463,6 +1460,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1560,6 +1593,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1853,6 +1896,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_0_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_0_od_restore_table_single(smu, input[0]); @@ -1975,7 +2060,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2122,7 +2208,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) @@ -2188,6 +2278,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_0_set_supported_od_feature_mask(smu); @@ -2485,7 +2579,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; - u32 workload_mask, selected_workload_mask; + u32 workload_mask; smu->power_profile_mode = input[size]; @@ -2552,7 +2646,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - selected_workload_mask = workload_mask = 1 << workload_type; + workload_mask = 1 << workload_type; /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && @@ -2567,12 +2661,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask |= 1 << workload_type; } + smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); - if (!ret) - smu->workload_mask = selected_workload_mask; + if (!ret) { + smu_cmn_assign_power_profile(smu); + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } return ret; } @@ -3024,7 +3128,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .i2c_init = smu_v13_0_0_i2c_control_init, .i2c_fini = smu_v13_0_0_i2c_control_fini, .is_dpm_running = smu_v13_0_0_is_dpm_running, - .dump_pptable = smu_v13_0_0_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 9c2c43bfed0b..f5db181ef489 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en) return ret; } -static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -811,9 +813,10 @@ failed: } static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -950,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1046,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1060,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1069,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 55ed6247eb61..fa30a9e1f27a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -102,6 +102,24 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define MCA_BANK_IPID(_ip, _hwid, _type) \ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } +static inline bool smu_v13_0_6_is_unified_metrics(struct smu_context *smu) +{ + return (smu->adev->flags & AMD_IS_APU) && + smu->smc_fw_version <= 0x4556900; +} + +static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *smu) +{ + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + return smu->smc_fw_version >= 0x557600; + case IP_VERSION(13, 0, 14): + return smu->smc_fw_version >= 0x05550E00; + default: + return false; + } +} + struct mca_bank_ipid { enum amdgpu_mca_ip ip; uint16_t hwid; @@ -253,7 +271,7 @@ struct PPTable_t { #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) -#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ +#define GET_METRIC_FIELD(field, flag) ((flag) ?\ (metrics_a->field) : (metrics_x->field)) struct smu_v13_0_6_dpm_map { @@ -352,7 +370,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_6); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -583,7 +601,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - struct amdgpu_device *adev = smu->adev; + bool flag = smu_v13_0_6_is_unified_metrics(smu); int ret, i, retry = 100; uint32_t table_version; @@ -595,7 +613,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (GET_METRIC_FIELD(AccumulationCounter)) + if (GET_METRIC_FIELD(AccumulationCounter, flag)) break; usleep_range(1000, 1100); @@ -612,29 +630,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) table_version; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - GET_METRIC_FIELD(SocclkFrequencyTable)[i]); + GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; + pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0]; pptable->Init = true; } @@ -957,6 +975,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -971,50 +990,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_GFXCLK: if (smu->smc_fw_version >= 0x552F00) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -1739,7 +1758,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { /* Min UCLK is not expected to be changed */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, uclk_table->max); + smu, SMU_UCLK, 0, uclk_table->max, false); if (ret) return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; @@ -1758,7 +1777,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, + bool automatic) { struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; @@ -1806,7 +1826,7 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( - smu, SMU_UCLK, 0, max); + smu, SMU_UCLK, 0, max, false); if (!ret) pstate_table->uclk_pstate.curr.max = max; } @@ -1946,7 +1966,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = dpm_context->dpm_tables.gfx_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1954,7 +1974,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.uclk_table.min; max_clk = dpm_context->dpm_tables.uclk_table.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; pstate_table->uclk_pstate.custom.max = 0; @@ -1978,7 +1998,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, max_clk = pstate_table->gfxclk_pstate.custom.max; ret = smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_GFXCLK, min_clk, max_clk); + smu, SMU_GFXCLK, min_clk, max_clk, false); if (ret) return ret; @@ -1989,7 +2009,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->uclk_pstate.curr.min; max_clk = pstate_table->uclk_pstate.custom.max; return smu_v13_0_6_set_soft_freq_limited_range( - smu, SMU_UCLK, min_clk, max_clk); + smu, SMU_UCLK, min_clk, max_clk, false); } break; default: @@ -2299,14 +2319,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { + bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst; struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_5 *gpu_metrics = - (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_6 *gpu_metrics = + (struct gpu_metrics_v1_6 *)smu_table->gpu_metrics_table; + bool flag = smu_v13_0_6_is_unified_metrics(smu); + int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i, j; MetricsTableX_t *metrics_x; MetricsTableA_t *metrics_a; + struct amdgpu_xcp *xcp; u16 link_width_level; + u32 inst_mask; metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); @@ -2317,53 +2341,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_a = (MetricsTableA_t *)metrics_x; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 6); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag)); + + /* Total accumulated cycle counter */ + gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag); - /* Throttle status is not reported through metrics now */ - gpu_metrics->throttle_status = 0; + /* Accumulated throttler residencies */ + gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag); + gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag); + gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag); + gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag); + gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag); /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { /*Check smu version, PCIE link speed and width will be reported from pmfw metric @@ -2399,41 +2430,77 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_x->PCIeNAKSentCountAcc; gpu_metrics->pcie_nak_rcvd_count_acc = metrics_x->PCIeNAKReceivedCountAcc; + if (smu_v13_0_6_is_other_end_count_available(smu)) + gpu_metrics->pcie_lc_perf_other_end_recovery = + metrics_x->PCIeOtherEndRecoveryAcc; + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag)); for (i = 0; i < NUM_XGMI_LINKS; i++) { gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]); gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); - } + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]); + } + + gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; + + apu_per_inst = (adev->flags & AMD_IS_APU) && (smu->smc_fw_version >= 0x04556A00); + smu_13_0_6_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 6)) && + (smu->smc_fw_version >= 0x556F00); + smu_13_0_14_per_inst = !(adev->flags & AMD_IS_APU) && + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 14)) && + (smu->smc_fw_version >= 0x05550B00); + + per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst; + + for_each_xcp(adev->xcp_mgr, xcp, i) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + gpu_metrics->xcp_stats[i].jpeg_busy + [(idx * adev->jpeg.num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag) + [(inst * adev->jpeg.num_jpeg_rings) + j]); + } + gpu_metrics->xcp_stats[i].vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]); + idx++; - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - inst = GET_INST(JPEG, i); - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) - [(inst * adev->jpeg.num_jpeg_rings) + j]); } - } - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - inst = GET_INST(VCN, i); - gpu_metrics->vcn_activity[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = + SMUQ10_ROUND(metrics_x->GfxBusy[inst]); + gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = + SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]); + idx++; + } + } } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); + gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag)); + gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag)); - gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag); *table = (void *)gpu_metrics; kfree(metrics_x); @@ -2974,6 +3041,16 @@ static int mmhub_err_codes[] = { CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, }; +static int vcn_err_codes[] = { + CODE_VIDD, CODE_VIDV, +}; +static int jpeg_err_codes[] = { + CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D, + CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D, + CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D, + CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D, +}; + static const struct mca_ras_info mca_ras_table[] = { { .blkid = AMDGPU_RAS_BLOCK__UMC, @@ -3002,6 +3079,20 @@ static const struct mca_ras_info mca_ras_table[] = { .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL, .ip = AMDGPU_MCA_IP_PCS_XGMI, .get_err_count = mca_pcs_xgmi_mca_get_err_count, + }, { + .blkid = AMDGPU_RAS_BLOCK__VCN, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = vcn_err_codes, + .err_code_count = ARRAY_SIZE(vcn_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, + }, { + .blkid = AMDGPU_RAS_BLOCK__JPEG, + .ip = AMDGPU_MCA_IP_SMU, + .err_code_array = jpeg_err_codes, + .err_code_count = ARRAY_SIZE(jpeg_err_codes), + .get_err_count = mca_smu_mca_get_err_count, + .bank_is_valid = mca_smu_bank_is_valid, }, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b891a5e0a396..c5d3e25cc967 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -83,6 +83,8 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 +#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12 #define LINK_SPEED_MAX 3 @@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v13_0_7_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - SkuTable_t *skutable = &pptable->SkuTable; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); -} - static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1132,6 +1121,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; + case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP: + od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp; + od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1452,6 +1449,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmStopTemp); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1548,6 +1581,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + od_table->OverDriveTable.FanZeroRpmStopTemp = + boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; default: dev_info(adev->dev, "Invalid table index: %ld\n", input); return -EINVAL; @@ -1841,6 +1884,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + + case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP: + if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v13_0_7_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmStopTemp = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v13_0_7_od_restore_table_single(smu, input[0]); @@ -1964,7 +2049,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, ret = smu_v13_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -2106,7 +2192,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET; } static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) @@ -2172,6 +2262,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; + user_od_table->OverDriveTable.FanZeroRpmStopTemp = + user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp; } smu_v13_0_7_set_supported_od_feature_mask(smu); @@ -2499,13 +2593,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); else - smu->workload_mask = (1 << workload_type); + smu_cmn_assign_power_profile(smu); return ret; } @@ -2605,7 +2700,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, .is_dpm_running = smu_v13_0_7_is_dpm_running, - .dump_pptable = smu_v13_0_7_dump_pptable, .init_microcode = smu_v13_0_init_microcode, .load_microcode = smu_v13_0_load_microcode, .fini_microcode = smu_v13_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 260c339f89c5..73b4506ef5a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en) return ret; } -static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) +static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, + bool enable, + int inst) { int ret = 0; @@ -945,9 +947,10 @@ failed: } static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max, + bool automatic) { enum smu_message_type msg_set_min, msg_set_max; uint32_t min_clk = min; @@ -1134,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu, if (ret) goto force_level_out; - ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) goto force_level_out; break; @@ -1254,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (sclk_min && sclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SCLK, - sclk_min, - sclk_max); + SMU_SCLK, + sclk_min, + sclk_max, + false); if (ret) return ret; @@ -1266,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, if (fclk_min && fclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_FCLK, - fclk_min, - fclk_max); + SMU_FCLK, + fclk_min, + fclk_max, + false); if (ret) return ret; } if (socclk_min && socclk_max) { ret = yellow_carp_set_soft_freq_limited_range(smu, - SMU_SOCCLK, - socclk_min, - socclk_max); + SMU_SOCCLK, + socclk_min, + socclk_max, + false); if (ret) return ret; } @@ -1286,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1295,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu, ret = yellow_carp_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 865e916fc425..ecb0164d533e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1102,7 +1102,8 @@ failed: int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, - uint32_t max) + uint32_t max, + bool automatic) { int ret = 0, clk_id = 0; uint32_t param; @@ -1117,7 +1118,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0xffff); + else + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, param, NULL); if (ret) @@ -1125,7 +1129,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, } if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + if (automatic) + param = (uint32_t)((clk_id << 16) | 0); + else + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, param, NULL); if (ret) @@ -1202,6 +1209,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, uint32_t dclk_min = 0, dclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; + bool auto_level = false; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1233,6 +1241,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, dclk_max = dclk_table->max; fclk_min = fclk_table->min; fclk_max = fclk_table->max; + auto_level = true; break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; @@ -1268,7 +1277,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, sclk_min, - sclk_max); + sclk_max, + auto_level); if (ret) return ret; @@ -1280,7 +1290,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_MCLK, mclk_min, - mclk_max); + mclk_max, + auto_level); if (ret) return ret; @@ -1292,7 +1303,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + auto_level); if (ret) return ret; @@ -1307,7 +1319,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_VCLK1 : SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + auto_level); if (ret) return ret; } @@ -1322,7 +1335,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, i ? SMU_DCLK1 : SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + auto_level); if (ret) return ret; } @@ -1334,7 +1348,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + auto_level); if (ret) return ret; @@ -1492,7 +1507,8 @@ int smu_v14_0_set_single_dpm_table(struct smu_context *smu, } int smu_v14_0_set_vcn_enable(struct smu_context *smu, - bool enable) + bool enable, + int inst) { struct amdgpu_device *adev = smu->adev; int i, ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 1e16a281f2dc..59b369eff30f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -670,19 +670,6 @@ static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static void smu_v14_0_2_dump_pptable(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; - - dev_info(smu->adev->dev, "Dumped PPTable:\n"); - - dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); - dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); - dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); -} - static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) { uint32_t throttler_status = 0; @@ -1457,7 +1444,8 @@ static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, ret = smu_v14_0_set_soft_freq_limited_range(smu, clk_type, min_freq, - max_freq); + max_freq, + false); break; case SMU_DCEFCLK: case SMU_PCIE: @@ -1807,12 +1795,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + if (!ret) - smu->workload_mask = 1 << workload_type; + smu_cmn_assign_power_profile(smu); return ret; } @@ -2726,7 +2713,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .i2c_init = smu_v14_0_2_i2c_control_init, .i2c_fini = smu_v14_0_2_i2c_control_fini, .is_dpm_running = smu_v14_0_2_is_dpm_running, - .dump_pptable = smu_v14_0_2_dump_pptable, .init_microcode = smu_v14_0_init_microcode, .load_microcode = smu_v14_0_load_microcode, .fini_microcode = smu_v14_0_fini_microcode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..f1ab1a6bb467 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1078,6 +1078,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 5): structure_size = sizeof(struct gpu_metrics_v1_5); break; + case METRICS_VERSION(1, 6): + structure_size = sizeof(struct gpu_metrics_v1_6); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; @@ -1138,6 +1141,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } +void smu_cmn_assign_power_profile(struct smu_context *smu) +{ + uint32_t index; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; +} + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..8a801e389659 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +void smu_cmn_assign_power_profile(struct smu_context *smu); + /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 6f4d212607d7..c09ecf1a68a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -78,7 +78,6 @@ #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu) #define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max) #define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level) -#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu) #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap) #define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src) #define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu) diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index ddf20708370f..c901ac00c0c3 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,6 +6,7 @@ config DRM_HDLCD tristate "ARM HDLCD" depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER help @@ -27,6 +28,7 @@ config DRM_MALI_DISPLAY tristate "ARM Mali Display Processor" depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig index 4acc4285a4eb..415c10a6374b 100644 --- a/drivers/gpu/drm/arm/display/Kconfig +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -3,6 +3,7 @@ config DRM_KOMEDA tristate "ARM Komeda display driver" depends on DRM && OF depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 55c3773befde..6d475bb34002 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -9,7 +9,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_fbdev_dma.h> +#include <drm/drm_client_setup.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include "komeda_dev.h" @@ -84,7 +84,7 @@ static int komeda_platform_probe(struct platform_device *pdev) } dev_set_drvdata(dev, mdrv); - drm_fbdev_dma_setup(&mdrv->kms->base, 32); + drm_client_setup(&mdrv->kms->base, NULL); return 0; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index e5eb5d672bcd..1e7b1fcb2848 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> @@ -58,6 +59,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) static const struct drm_driver komeda_kms_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &komeda_cma_fops, .name = "komeda", .desc = "Arm Komeda Display Processor driver", diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 32be9e370049..cd4389809d42 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -9,6 +9,7 @@ * ARM HDLCD Driver */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/clk.h> @@ -21,8 +22,8 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -228,6 +229,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver hdlcd_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "hdlcd", .desc = "ARM HDLCD Controller DRM", @@ -285,7 +287,7 @@ static int hdlcd_drm_bind(struct device *dev) */ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) { hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_aperture_remove_framebuffers(&hdlcd_driver); + aperture_remove_all_conflicting_devices(hdlcd_driver.name); } drm_mode_config_reset(drm); @@ -299,7 +301,7 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_register; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 6682131d2910..4cb25004b84f 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -562,6 +563,7 @@ static void malidp_debugfs_init(struct drm_minor *minor) static const struct drm_driver malidp_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = malidp_debugfs_init, #endif @@ -852,7 +854,7 @@ static int malidp_bind(struct device *dev) if (ret) goto register_fail; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index e5597d7c9ae1..b22c891a670b 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig @@ -2,6 +2,7 @@ config DRM_ARMADA tristate "DRM support for Marvell Armada SoCs" depends on DRM && HAVE_CLK && ARM && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION help diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index c303e8c7ff6c..3c0ff221a43b 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -16,6 +16,8 @@ struct armada_crtc; struct armada_gem_object; struct clk; struct drm_display_mode; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; static inline void armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr) @@ -74,10 +76,13 @@ struct armada_private { #define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm) #if defined(CONFIG_DRM_FBDEV_EMULATION) -void armada_fbdev_setup(struct drm_device *dev); +int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes); +#define ARMADA_FBDEV_DRIVER_OPS \ + .fbdev_probe = armada_fbdev_driver_fbdev_probe #else -static inline void armada_fbdev_setup(struct drm_device *dev) -{ } +#define ARMADA_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif int armada_overlay_plane_create(struct drm_device *, unsigned long); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index e51ecc4f7ef4..5c26f0409478 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -3,6 +3,7 @@ * Copyright (C) 2012 Russell King */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -10,8 +11,8 @@ #include <linux/of_graph.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -39,6 +40,7 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops); static const struct drm_driver armada_drm_driver = { .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, + ARMADA_FBDEV_DRIVER_OPS, .major = 1, .minor = 0, .name = "armada-drm", @@ -91,7 +93,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(&armada_drm_driver); + ret = aperture_remove_all_conflicting_devices(armada_drm_driver.name); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); @@ -137,7 +139,7 @@ static int armada_drm_bind(struct device *dev) armada_drm_debugfs_init(priv->drm.primary); #endif - armada_fbdev_setup(&priv->drm); + drm_client_setup(&priv->drm, NULL); return 0; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index d223176912b6..6ee7ce04ee71 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -39,8 +39,10 @@ static const struct fb_ops armada_fb_ops = { .fb_destroy = armada_fbdev_fb_destroy, }; -static int armada_fbdev_create(struct drm_fb_helper *fbh, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs armada_fbdev_helper_funcs; + +int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fbh->dev; struct drm_mode_fb_cmd2 mode; @@ -98,6 +100,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh, info->fix.smem_len = obj->obj.size; info->screen_size = obj->obj.size; info->screen_base = ptr; + fbh->funcs = &armada_fbdev_helper_funcs; fbh->fb = &dfb->fb; drm_fb_helper_fill_info(info, fbh, sizes); @@ -112,109 +115,3 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh, dfb->fb.funcs->destroy(&dfb->fb); return ret; } - -static int armada_fb_probe(struct drm_fb_helper *fbh, - struct drm_fb_helper_surface_size *sizes) -{ - int ret = 0; - - if (!fbh->fb) { - ret = armada_fbdev_create(fbh, sizes); - if (ret == 0) - ret = 1; - } - return ret; -} - -static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { - .fb_probe = armada_fb_probe, -}; - -/* - * Fbdev client and struct drm_client_funcs - */ - -static void armada_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fbh = drm_fb_helper_from_client(client); - - if (fbh->info) { - drm_fb_helper_unregister_info(fbh); - } else { - drm_client_release(&fbh->client); - drm_fb_helper_unprepare(fbh); - kfree(fbh); - } -} - -static int armada_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int armada_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fbh = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fbh); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fbh); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fbh); -err_drm_err: - drm_err(dev, "armada: Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs armada_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = armada_fbdev_client_unregister, - .restore = armada_fbdev_client_restore, - .hotplug = armada_fbdev_client_hotplug, -}; - -void armada_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *fbh; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fbh = kzalloc(sizeof(*fbh), GFP_KERNEL); - if (!fbh) - return; - drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs); - - ret = drm_client_init(dev, &fbh->client, "fbdev", &armada_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fbh->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fbh); - kfree(fbh); - return; -} diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 8137c39b057b..6e68f20aac21 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -4,6 +4,7 @@ config DRM_ASPEED_GFX depends on DRM && OF depends on (COMPILE_TEST || ARCH_ASPEED) depends on MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index a7a6b70220eb..109023815fa2 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -14,6 +14,7 @@ #include <linux/reset.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_device.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -247,6 +248,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "aspeed-gfx-drm", .desc = "ASPEED GFX DRM", @@ -339,7 +341,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(&priv->drm, 32); + drm_client_setup(&priv->drm, NULL); return 0; err_unload: diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index 563fa7a3b546..da0663542e8a 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -2,6 +2,7 @@ config DRM_AST tristate "AST server chips" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select I2C diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 00b364f9a71e..0e282b7b167c 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -149,28 +149,22 @@ int ast_dp_launch(struct ast_device *ast) return 0; } -static bool ast_dp_power_is_on(struct ast_device *ast) +static bool ast_dp_get_phy_sleep(struct ast_device *ast) { - u8 vgacre3; + u8 vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3); - vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3); - - return !(vgacre3 & AST_DP_PHY_SLEEP); + return (vgacre3 & AST_IO_VGACRE3_DP_PHY_SLEEP); } -static void ast_dp_power_on_off(struct drm_device *dev, bool on) +static void ast_dp_set_phy_sleep(struct ast_device *ast, bool sleep) { - struct ast_device *ast = to_ast_device(dev); - // Read and Turn off DP PHY sleep - u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, AST_DP_VIDEO_ENABLE); - - // Turn on DP PHY sleep - if (!on) - bE3 |= AST_DP_PHY_SLEEP; + u8 vgacre3 = 0x00; - // DP Power on/off - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3); + if (sleep) + vgacre3 |= AST_IO_VGACRE3_DP_PHY_SLEEP; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_PHY_SLEEP, + vgacre3); msleep(50); } @@ -192,23 +186,39 @@ static void ast_dp_link_training(struct ast_device *ast) drm_err(dev, "Link training failed\n"); } -static void ast_dp_set_on_off(struct drm_device *dev, bool on) +static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled) { - struct ast_device *ast = to_ast_device(dev); - u8 video_on_off = on; - u32 i = 0; - - // Video On/Off - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on); - - video_on_off <<= 4; - while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, - ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) { - // wait 1 ms - mdelay(1); - if (++i > 200) - break; + u8 vgacrdf_test = 0x00; + u8 vgacrdf; + unsigned int i; + + if (enabled) + vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE; + + for (i = 0; i < 200; ++i) { + if (i) + mdelay(1); + vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf, + AST_IO_VGACRDF_DP_VIDEO_ENABLE); + if (vgacrdf == vgacrdf_test) + return true; } + + return false; +} + +static void ast_dp_set_enable(struct ast_device *ast, bool enabled) +{ + struct drm_device *dev = &ast->base; + u8 vgacre3 = 0x00; + + if (enabled) + vgacre3 |= AST_IO_VGACRE3_DP_VIDEO_ENABLE; + + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_VIDEO_ENABLE, + vgacre3); + + drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled)); } static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode) @@ -317,26 +327,25 @@ static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; - struct ast_device *ast = to_ast_device(dev); + struct ast_device *ast = to_ast_device(encoder->dev); struct ast_connector *ast_connector = &ast->output.astdp.connector; if (ast_connector->physical_status == connector_status_connected) { - ast_dp_power_on_off(dev, AST_DP_POWER_ON); + ast_dp_set_phy_sleep(ast, false); ast_dp_link_training(ast); ast_wait_for_vretrace(ast); - ast_dp_set_on_off(dev, 1); + ast_dp_set_enable(ast, true); } } static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_dp_set_on_off(dev, 0); - ast_dp_power_on_off(dev, AST_DP_POWER_OFF); + ast_dp_set_enable(ast, false); + ast_dp_set_phy_sleep(ast, true); } static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = { @@ -383,22 +392,21 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector bool force) { struct ast_connector *ast_connector = to_ast_connector(connector); - struct drm_device *dev = connector->dev; struct ast_device *ast = to_ast_device(connector->dev); enum drm_connector_status status = connector_status_disconnected; - bool power_is_on; + bool phy_sleep; mutex_lock(&ast->modeset_lock); - power_is_on = ast_dp_power_is_on(ast); - if (!power_is_on) - ast_dp_power_on_off(dev, true); + phy_sleep = ast_dp_get_phy_sleep(ast); + if (phy_sleep) + ast_dp_set_phy_sleep(ast, false); if (ast_astdp_is_connected(ast)) status = connector_status_connected; - if (!power_is_on && status == connector_status_disconnected) - ast_dp_power_on_off(dev, false); + if (phy_sleep && status == connector_status_disconnected) + ast_dp_set_phy_sleep(ast, true); mutex_unlock(&ast->modeset_lock); @@ -414,6 +422,10 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs .detect_ctx = ast_astdp_connector_helper_detect_ctx, }; +/* + * Output + */ + static const struct drm_connector_funcs ast_astdp_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, @@ -422,34 +434,18 @@ static const struct drm_connector_funcs ast_astdp_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} - int ast_astdp_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.astdp.encoder; - struct ast_connector *ast_connector = &ast->output.astdp.connector; - struct drm_connector *connector = &ast_connector->base; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* encoder */ + + encoder = &ast->output.astdp.encoder; ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) @@ -458,9 +454,20 @@ int ast_astdp_output_init(struct ast_device *ast) encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_astdp_connector_init(dev, connector); + /* connector */ + + ast_connector = &ast->output.astdp.connector; + connector = &ast_connector->base; + ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); if (ret) return ret; + drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index e4c636f45082..9e19d8c17730 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -21,9 +21,9 @@ static void ast_release_firmware(void *data) ast->dp501_fw = NULL; } -static int ast_load_dp501_microcode(struct drm_device *dev) +static int ast_load_dp501_microcode(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; int ret; ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); @@ -109,10 +109,10 @@ static bool wait_fw_ready(struct ast_device *ast) } #endif -static bool ast_write_cmd(struct drm_device *dev, u8 data) +static bool ast_write_cmd(struct ast_device *ast, u8 data) { - struct ast_device *ast = to_ast_device(dev); int retry = 0; + if (wait_nack(ast)) { send_nack(ast); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data); @@ -131,10 +131,8 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data) return false; } -static bool ast_write_data(struct drm_device *dev, u8 data) +static bool ast_write_data(struct ast_device *ast, u8 data) { - struct ast_device *ast = to_ast_device(dev); - if (wait_nack(ast)) { send_nack(ast); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data); @@ -175,10 +173,10 @@ static void clear_cmd(struct ast_device *ast) } #endif -static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +static void ast_set_dp501_video_output(struct ast_device *ast, u8 mode) { - ast_write_cmd(dev, 0x40); - ast_write_data(dev, mode); + ast_write_cmd(ast, 0x40); + ast_write_data(ast, mode); msleep(10); } @@ -188,9 +186,8 @@ static u32 get_fw_base(struct ast_device *ast) return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; } -bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) +bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size) { - struct ast_device *ast = to_ast_device(dev); u32 i, data; u32 boot_address; @@ -207,9 +204,8 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) return false; } -static bool ast_launch_m68k(struct drm_device *dev) +static bool ast_launch_m68k(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u32 i, data, len = 0; u32 boot_address; u8 *fw_addr = NULL; @@ -226,7 +222,7 @@ static bool ast_launch_m68k(struct drm_device *dev) len = 32*1024; } else { if (!ast->dp501_fw && - ast_load_dp501_microcode(dev) < 0) + ast_load_dp501_microcode(ast) < 0) return false; fw_addr = (u8 *)ast->dp501_fw->data; @@ -348,9 +344,8 @@ static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, si return true; } -static bool ast_init_dvo(struct drm_device *dev) +static bool ast_init_dvo(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 jreg; u32 data; ast_write32(ast, 0xf004, 0x1e6e0000); @@ -421,9 +416,8 @@ static bool ast_init_dvo(struct drm_device *dev) } -static void ast_init_analog(struct drm_device *dev) +static void ast_init_analog(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u32 data; /* @@ -448,28 +442,28 @@ static void ast_init_analog(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x00); } -void ast_init_3rdtx(struct drm_device *dev) +void ast_init_3rdtx(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); - u8 jreg; + u8 vgacrd1; if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) { - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); - switch (jreg & 0x0e) { - case 0x04: - ast_init_dvo(dev); + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); + switch (vgacrd1) { + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + ast_init_dvo(ast); break; - case 0x08: - ast_launch_m68k(dev); + case AST_IO_VGACRD1_TX_DP501_VBIOS: + ast_launch_m68k(ast); break; - case 0x0c: - ast_init_dvo(dev); + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + ast_init_dvo(ast); break; default: - if (ast->tx_chip_types & BIT(AST_TX_SIL164)) - ast_init_dvo(dev); + if (ast->tx_chip == AST_TX_SIL164) + ast_init_dvo(ast); else - ast_init_analog(dev); + ast_init_analog(ast); } } } @@ -485,17 +479,17 @@ static const struct drm_encoder_funcs ast_dp501_encoder_funcs = { static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_set_dp501_video_output(dev, 1); + ast_set_dp501_video_output(ast, 1); } static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_device *dev = encoder->dev; + struct ast_device *ast = to_ast_device(encoder->dev); - ast_set_dp501_video_output(dev, 0); + ast_set_dp501_video_output(ast, 0); } static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = { @@ -567,34 +561,22 @@ static const struct drm_connector_funcs ast_dp501_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector) -{ - int ret; - - ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) - return ret; - - drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - - return 0; -} +/* + * Output + */ int ast_dp501_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.dp501.encoder; - struct ast_connector *ast_connector = &ast->output.dp501.connector; - struct drm_connector *connector = &ast_connector->base; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* encoder */ + + encoder = &ast->output.dp501.encoder; ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) @@ -603,9 +585,20 @@ int ast_dp501_output_init(struct ast_device *ast) encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_dp501_connector_init(dev, connector); + /* connector */ + + ast_connector = &ast->output.dp501.connector; + connector = &ast_connector->base; + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); if (ret) return ret; + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 3a908bb015fe..4afe4be072ef 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -26,12 +26,13 @@ * Authors: Dave Airlie <airlied@redhat.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> @@ -64,7 +65,8 @@ static const struct drm_driver ast_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - DRM_GEM_SHMEM_DRIVER_OPS + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* @@ -279,7 +281,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *drm; bool need_post = false; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, ast_driver.name); if (ret) return ret; @@ -360,7 +362,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - drm_fbdev_shmem_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; } @@ -396,7 +398,7 @@ static int ast_drm_thaw(struct drm_device *dev) ast_enable_vga(ast->ioregs); ast_open_key(ast->ioregs); ast_enable_mmio(dev->dev, ast->ioregs); - ast_post_gpu(dev); + ast_post_gpu(ast); return drm_mode_config_helper_resume(dev); } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 91fe07cf7b07..21ce3769bf0d 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -91,11 +91,6 @@ enum ast_tx_chip { AST_TX_ASTDP, }; -#define AST_TX_NONE_BIT BIT(AST_TX_NONE) -#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) -#define AST_TX_DP501_BIT BIT(AST_TX_DP501) -#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) - enum ast_config_mode { ast_use_p2a, ast_use_dt, @@ -187,10 +182,12 @@ struct ast_device { struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ + enum ast_tx_chip tx_chip; + struct ast_plane primary_plane; struct ast_plane cursor_plane; struct drm_crtc crtc; - struct { + union { struct { struct drm_encoder encoder; struct ast_connector connector; @@ -211,7 +208,6 @@ struct ast_device { bool support_wide_screen; - unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; @@ -407,9 +403,6 @@ int ast_mode_config_init(struct ast_device *ast); #define AST_DP501_LINKRATE 0xf014 #define AST_DP501_EDID_DATA 0xf020 -#define AST_DP_POWER_ON true -#define AST_DP_POWER_OFF false - /* * ASTDP resoultion table: * EX: ASTDP_A_B_C: @@ -453,7 +446,7 @@ int ast_mode_config_init(struct ast_device *ast); int ast_mm_init(struct ast_device *ast); /* ast post */ -void ast_post_gpu(struct drm_device *dev); +void ast_post_gpu(struct ast_device *ast); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); void ast_patch_ahb_2500(void __iomem *regs); @@ -462,8 +455,8 @@ int ast_vga_output_init(struct ast_device *ast); int ast_sil164_output_init(struct ast_device *ast); /* ast dp501 */ -bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); -void ast_init_3rdtx(struct drm_device *dev); +bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size); +void ast_init_3rdtx(struct ast_device *ast); int ast_dp501_output_init(struct ast_device *ast); /* aspeed DP */ diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d836f2a4f9f3..bc37c65305d4 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -68,11 +68,33 @@ static void ast_detect_widescreen(struct ast_device *ast) static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) { + static const char * const info_str[] = { + "analog VGA", + "Sil164 TMDS transmitter", + "DP501 DisplayPort transmitter", + "ASPEED DisplayPort transmitter", + }; + struct drm_device *dev = &ast->base; - u8 jreg; + u8 jreg, vgacrd1; + + /* + * Several of the listed TX chips are not explicitly supported + * by the ast driver. If these exist in real-world devices, they + * are most likely reported as VGA or SIL164 outputs. We warn here + * to get bug reports for these devices. If none come in for some + * time, we can begin to fail device probing on these values. + */ + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS, + "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS, + "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS, + "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_types |= AST_TX_NONE_BIT; + ast->tx_chip = AST_TX_NONE; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -85,7 +107,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) if (!need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_types = AST_TX_SIL164_BIT; + ast->tx_chip = AST_TX_SIL164; } if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) { @@ -94,49 +116,42 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) * the SOC scratch register #1 bits 11:8 (interestingly marked * as "reserved" in the spec) */ - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); + jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); switch (jreg) { - case 0x04: - ast->tx_chip_types = AST_TX_SIL164_BIT; + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + ast->tx_chip = AST_TX_SIL164; break; - case 0x08: + case AST_IO_VGACRD1_TX_DP501_VBIOS: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); if (ast->dp501_fw_addr) { /* backup firmware */ - if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) { + if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { drmm_kfree(dev, ast->dp501_fw_addr); ast->dp501_fw_addr = NULL; } } fallthrough; - case 0x0c: - ast->tx_chip_types = AST_TX_DP501_BIT; + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + ast->tx_chip = AST_TX_DP501; } } else if (IS_AST_GEN7(ast)) { - if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) == - ASTDP_DPMCU_TX) { + if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) == + AST_IO_VGACRD1_TX_ASTDP) { int ret = ast_dp_launch(ast); if (!ret) - ast->tx_chip_types = AST_TX_ASTDP_BIT; + ast->tx_chip = AST_TX_ASTDP; } } - /* Print stuff for diagnostic purposes */ - if (ast->tx_chip_types & AST_TX_NONE_BIT) - drm_info(dev, "Using analog VGA\n"); - if (ast->tx_chip_types & AST_TX_SIL164_BIT) - drm_info(dev, "Using Sil164 TMDS transmitter\n"); - if (ast->tx_chip_types & AST_TX_DP501_BIT) - drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) - drm_info(dev, "Using ASPEED DisplayPort transmitter\n"); + drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); } -static int ast_get_dram_info(struct drm_device *dev) +static int ast_get_dram_info(struct ast_device *ast) { + struct drm_device *dev = &ast->base; struct device_node *np = dev->dev->of_node; - struct ast_device *ast = to_ast_device(dev); uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; uint32_t denum, num, div, ref_pll, dsel; @@ -278,7 +293,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev, ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); - ret = ast_get_dram_info(dev); + ret = ast_get_dram_info(ast); if (ret) return ERR_PTR(ret); @@ -286,7 +301,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev, ast->mclk, ast->dram_type, ast->dram_bus_width); if (need_post) - ast_post_gpu(dev); + ast_post_gpu(ast); ret = ast_mm_init(ast); if (ret) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index ed496fb32bf3..9d5321c81e68 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1287,9 +1287,9 @@ static const struct drm_crtc_funcs ast_crtc_funcs = { .atomic_destroy_state = ast_crtc_atomic_destroy_state, }; -static int ast_crtc_init(struct drm_device *dev) +static int ast_crtc_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; int ret; @@ -1396,28 +1396,26 @@ int ast_mode_config_init(struct ast_device *ast) if (ret) return ret; - ast_crtc_init(dev); + ret = ast_crtc_init(ast); + if (ret) + return ret; - if (ast->tx_chip_types & AST_TX_NONE_BIT) { + switch (ast->tx_chip) { + case AST_TX_NONE: ret = ast_vga_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_SIL164_BIT) { + break; + case AST_TX_SIL164: ret = ast_sil164_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_DP501_BIT) { + break; + case AST_TX_DP501: ret = ast_dp501_output_init(ast); - if (ret) - return ret; - } - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + break; + case AST_TX_ASTDP: ret = ast_astdp_output_init(ast); - if (ret) - return ret; + break; } + if (ret) + return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 65755798ab94..364030f97571 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -34,16 +34,14 @@ #include "ast_dram_tables.h" #include "ast_drv.h" -static void ast_post_chip_2300(struct drm_device *dev); -static void ast_post_chip_2500(struct drm_device *dev); +static void ast_post_chip_2300(struct ast_device *ast); +static void ast_post_chip_2500(struct ast_device *ast); static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; -static void -ast_set_def_ext_reg(struct drm_device *dev) +static void ast_set_def_ext_reg(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 i, index, reg; const u8 *ext_reg_info; @@ -252,9 +250,8 @@ cbr_start: -static void ast_init_dram_reg(struct drm_device *dev) +static void ast_init_dram_reg(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); u8 j; u32 data, temp, i; const struct ast_dramstruct *dram_reg_info; @@ -343,26 +340,24 @@ static void ast_init_dram_reg(struct drm_device *dev) } while ((j & 0x40) == 0); } -void ast_post_gpu(struct drm_device *dev) +void ast_post_gpu(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); - - ast_set_def_ext_reg(dev); + ast_set_def_ext_reg(ast); if (IS_AST_GEN7(ast)) { - if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + if (ast->tx_chip == AST_TX_ASTDP) ast_dp_launch(ast); } else if (ast->config_mode == ast_use_p2a) { if (IS_AST_GEN6(ast)) - ast_post_chip_2500(dev); + ast_post_chip_2500(ast); else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) - ast_post_chip_2300(dev); + ast_post_chip_2300(ast); else - ast_init_dram_reg(dev); + ast_init_dram_reg(ast); - ast_init_3rdtx(dev); + ast_init_3rdtx(ast); } else { - if (ast->tx_chip_types & AST_TX_SIL164_BIT) + if (ast->tx_chip == AST_TX_SIL164) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } @@ -1569,9 +1564,8 @@ ddr2_init_start: } -static void ast_post_chip_2300(struct drm_device *dev) +static void ast_post_chip_2300(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); struct ast2300_dram_param param; u32 temp; u8 reg; @@ -2038,9 +2032,9 @@ void ast_patch_ahb_2500(void __iomem *regs) __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ } -void ast_post_chip_2500(struct drm_device *dev) +void ast_post_chip_2500(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; u32 temp; u8 reg; diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 040961cc1a19..2aadf07d135a 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -37,28 +37,29 @@ #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) -#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5) +#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5) +/* Display Transmitter Type */ +#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1) +#define AST_IO_VGACRD1_NO_TX 0x00 +#define AST_IO_VGACRD1_TX_ITE66121_VBIOS 0x02 +#define AST_IO_VGACRD1_TX_SIL164_VBIOS 0x04 +#define AST_IO_VGACRD1_TX_CH7003_VBIOS 0x06 +#define AST_IO_VGACRD1_TX_DP501_VBIOS 0x08 +#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a +#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */ +#define AST_IO_VGACRD1_TX_ASTDP 0x0e + #define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0) #define AST_IO_VGACRDC_LINK_SUCCESS BIT(0) #define AST_IO_VGACRDF_HPD BIT(0) +#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */ +#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0) +#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4) #define AST_IO_VGACRE5_EDID_READ_DONE BIT(0) #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) -/* - * Display Transmitter Type - */ - -#define TX_TYPE_MASK GENMASK(3, 1) -#define NO_TX (0 << 1) -#define ITE66121_VBIOS_TX (1 << 1) -#define SI164_VBIOS_TX (2 << 1) -#define CH7003_VBIOS_TX (3 << 1) -#define DP501_VBIOS_TX (4 << 1) -#define ANX9807_VBIOS_TX (5 << 1) -#define TX_FW_EMBEDDED_FW_TX (6 << 1) -#define ASTDP_DPMCU_TX (7 << 1) #define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) //#define AST_VRAM_INIT_BY_BMC BIT(7) @@ -68,18 +69,6 @@ * AST DisplayPort */ -/* Define for Soc scratched reg used on ASTDP */ -#define AST_DP_PHY_SLEEP BIT(4) -#define AST_DP_VIDEO_ENABLE BIT(0) - -/* - * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE - * Precondition: A. ~AST_DP_PHY_SLEEP && - * B. DP_HPD && - * C. DP_LINK_SUCCESS - */ -#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4) - /* * ASTDP setmode registers: * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c index c231389936bd..be01254dd48a 100644 --- a/drivers/gpu/drm/ast/ast_sil164.c +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -73,52 +73,49 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector) +/* + * Output + */ + +int ast_sil164_output_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; struct i2c_adapter *ddc; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* DDC */ + ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + if (IS_ERR(ddc)) + return PTR_ERR(ddc); + + /* encoder */ + + encoder = &ast->output.sil164.encoder; + ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) return ret; - } + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* connector */ + ast_connector = &ast->output.sil164.connector; + connector = &ast_connector->base; ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, DRM_MODE_CONNECTOR_DVII, ddc); if (ret) return ret; - drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - return 0; -} - -int ast_sil164_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.sil164.encoder; - struct ast_connector *ast_connector = &ast->output.sil164.connector; - struct drm_connector *connector = &ast_connector->base; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_sil164_connector_init(dev, connector); - if (ret) - return ret; ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c index dd389a0a8f4a..abe0fff8485c 100644 --- a/drivers/gpu/drm/ast/ast_vga.c +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -73,52 +73,49 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector) +/* + * Output + */ + +int ast_vga_output_init(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; struct i2c_adapter *ddc; + struct drm_encoder *encoder; + struct ast_connector *ast_connector; + struct drm_connector *connector; int ret; + /* DDC */ + ddc = ast_ddc_create(ast); - if (IS_ERR(ddc)) { - ret = PTR_ERR(ddc); - drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + if (IS_ERR(ddc)) + return PTR_ERR(ddc); + + /* encoder */ + + encoder = &ast->output.vga.encoder; + ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) return ret; - } + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* connector */ + ast_connector = &ast->output.vga.connector; + connector = &ast_connector->base; ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, ddc); if (ret) return ret; - drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - return 0; -} - -int ast_vga_output_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_crtc *crtc = &ast->crtc; - struct drm_encoder *encoder = &ast->output.vga.encoder; - struct ast_connector *ast_connector = &ast->output.vga.connector; - struct drm_connector *connector = &ast_connector->base; - int ret; - - ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - if (ret) - return ret; - encoder->possible_crtcs = drm_crtc_mask(crtc); - - ret = ast_vga_connector_init(dev, connector); - if (ret) - return ret; ast_connector->physical_status = connector->status; ret = drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 945f3aa7bb24..f8b9c91907d8 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -2,6 +2,7 @@ config DRM_ATMEL_HLCDC tristate "DRM Support for ATMEL HLCDC Display Controller" depends on DRM && OF && COMMON_CLK && ((MFD_ATMEL_HLCDC && ARM) || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 9ce429f889ca..792dcc19e8e7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -18,8 +18,10 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -840,6 +842,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "atmel-hlcdc", .desc = "Atmel HLCD Controller DRM", @@ -865,7 +868,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(ddev, 24); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB888); return 0; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3eb955333c80..6b4664d91faa 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -90,6 +90,17 @@ config DRM_FSL_LDB help Support for i.MX8MP DPI-to-LVDS on-SoC encoder. +config DRM_ITE_IT6263 + tristate "ITE IT6263 LVDS/HDMI bridge" + depends on OF + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR + select DRM_KMS_HELPER + select REGMAP_I2C + help + ITE IT6263 LVDS to HDMI bridge chip driver. + config DRM_ITE_IT6505 tristate "ITE IT6505 DisplayPort bridge" depends on OF @@ -140,6 +151,8 @@ config DRM_LONTIUM_LT9611 select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_MIPI_DSI + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER select REGMAP_I2C help Driver for Lontium LT9611 DSI to HDMI bridge @@ -368,6 +381,13 @@ config DRM_TI_DLPC3433 It supports up to 720p resolution with 60 and 120 Hz refresh rates. +config DRM_TI_TDP158 + tristate "TI TDP158 HDMI/TMDS bridge" + depends on OF + select DRM_PANEL_BRIDGE + help + Texas Instruments TDP158 HDMI/TMDS Bridge driver + config DRM_TI_TFP410 tristate "TI TFP410 DVI/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 7df87b582dca..97304b429a53 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o +obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o @@ -32,6 +33,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o +obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index a2e9bb485c36..a2675b121fe4 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2551,6 +2551,8 @@ static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev) mutex_lock(&ctx->lock); anx7625_stop_dp_work(ctx); + if (!ctx->pdata.panel_bridge) + anx7625_remove_edid(ctx); anx7625_power_standby(ctx); mutex_unlock(&ctx->lock); diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index 295e9d031e2d..015983c015e5 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -121,6 +121,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, data->bridge.funcs = &drm_aux_bridge_funcs; data->bridge.of_node = data->dev->of_node; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + return devm_drm_bridge_add(data->dev, &data->bridge); } diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 6886db2d9e00..48f297c78ee6 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -180,6 +180,10 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, data->bridge.ops = DRM_BRIDGE_OP_HPD; data->bridge.type = id->driver_data; + /* passthrough data, allow everything */ + data->bridge.interlace_allowed = true; + data->bridge.ycbcr_420_allowed = true; + auxiliary_set_drvdata(auxdev, data); return devm_drm_bridge_add(data->dev, &data->bridge); diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index ab8e00baf3f1..aab9ce7be94c 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -270,6 +270,10 @@ static int display_connector_probe(struct platform_device *pdev) /* All the supported connector types support interlaced modes. */ conn->bridge.interlace_allowed = true; + if (type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort) + conn->bridge.ycbcr_420_allowed = true; + /* Get the optional connector label. */ of_property_read_string(pdev->dev.of_node, "label", &label); diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 8dd89efa8ea7..9a480c6abb85 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -3,6 +3,16 @@ if ARCH_MXC || COMPILE_TEST config DRM_IMX_LDB_HELPER tristate +config DRM_IMX_LEGACY_BRIDGE + tristate + depends on DRM_IMX + help + This is a DRM bridge implementation for the DRM i.MX IPUv3 driver, + that uses of_get_drm_display_mode to acquire display mode. + + Newer designs should not use this bridge and should use proper panel + driver instead. + config DRM_IMX8MP_DW_HDMI_BRIDGE tristate "Freescale i.MX8MP HDMI-TX bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index edb0a7b71b30..dd5d48584806 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o +obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c new file mode 100644 index 000000000000..3ebf0b9866de --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale i.MX drm driver + * + * bridge driver for legacy DT bindings, utilizing display-timings node + */ + +#include <drm/drm_bridge.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/bridge/imx.h> + +#include <video/of_display_timing.h> +#include <video/of_videomode.h> + +struct imx_legacy_bridge { + struct drm_bridge base; + + struct drm_display_mode mode; + u32 bus_flags; +}; + +#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base) + +static int imx_legacy_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + return 0; +} + +static int imx_legacy_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct imx_legacy_bridge *imx_bridge = to_imx_legacy_bridge(bridge); + int ret; + + ret = drm_connector_helper_get_modes_fixed(connector, &imx_bridge->mode); + if (ret) + return ret; + + connector->display_info.bus_flags = imx_bridge->bus_flags; + + return 0; +} + +struct drm_bridge_funcs imx_legacy_bridge_funcs = { + .attach = imx_legacy_bridge_attach, + .get_modes = imx_legacy_bridge_get_modes, +}; + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type) +{ + struct imx_legacy_bridge *imx_bridge; + int ret; + + imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL); + if (!imx_bridge) + return ERR_PTR(-ENOMEM); + + ret = of_get_drm_display_mode(np, + &imx_bridge->mode, + &imx_bridge->bus_flags, + OF_USE_NATIVE_MODE); + if (ret) + return ERR_PTR(ret); + + imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER; + + imx_bridge->base.funcs = &imx_legacy_bridge_funcs; + imx_bridge->base.of_node = np; + imx_bridge->base.ops = DRM_BRIDGE_OP_MODES; + imx_bridge->base.type = type; + + ret = devm_drm_bridge_add(dev, &imx_bridge->base); + if (ret) + return ERR_PTR(ret); + + return &imx_bridge->base; +} +EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Freescale i.MX DRM bridge driver for legacy DT bindings"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 13bc570c5473..8fcc6d18f4ab 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -23,6 +23,7 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data, const struct drm_display_mode *mode) { struct imx8mp_hdmi *hdmi = (struct imx8mp_hdmi *)data; + long round_rate; if (mode->clock < 13500) return MODE_CLOCK_LOW; @@ -30,8 +31,14 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data, if (mode->clock > 297000) return MODE_CLOCK_HIGH; - if (clk_round_rate(hdmi->pixclk, mode->clock * 1000) != - mode->clock * 1000) + round_rate = clk_round_rate(hdmi->pixclk, mode->clock * 1000); + /* imx8mp's pixel clock generator (fsl-samsung-hdmi) cannot generate + * all possible frequencies, so allow some tolerance to support more + * modes. + * Allow 0.5% difference allowed in various standards (VESA, CEA861) + * 0.5% = 5/1000 tolerance (mode->clock is 1/1000) + */ + if (abs(round_rate - mode->clock * 1000) > mode->clock * 5) return MODE_CLOCK_RANGE; /* We don't support double-clocked and Interlaced modes */ @@ -111,12 +118,12 @@ static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) dw_hdmi_remove(hdmi->dw_hdmi); } -static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev) +static int imx8mp_dw_hdmi_pm_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev) +static int imx8mp_dw_hdmi_pm_resume(struct device *dev) { struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); @@ -126,8 +133,7 @@ static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev) } static const struct dev_pm_ops imx8mp_dw_hdmi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend, - imx8mp_dw_hdmi_pm_resume) + SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend, imx8mp_dw_hdmi_pm_resume) }; static const struct of_device_id imx8mp_dw_hdmi_of_table[] = { @@ -142,7 +148,7 @@ static struct platform_driver imx8mp_dw_hdmi_platform_driver = { .driver = { .name = "imx8mp-dw-hdmi-tx", .of_match_table = imx8mp_dw_hdmi_of_table, - .pm = &imx8mp_dw_hdmi_pm_ops, + .pm = pm_ptr(&imx8mp_dw_hdmi_pm_ops), }, }; diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c index 21471a9a28b2..c879e37f5811 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c @@ -542,12 +542,12 @@ static void imx8qm_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev) +static int imx8qm_ldb_runtime_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev) +static int imx8qm_ldb_runtime_resume(struct device *dev) { struct imx8qm_ldb *imx8qm_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qm_ldb->base; @@ -559,8 +559,7 @@ static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qm_ldb_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, - imx8qm_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, imx8qm_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qm_ldb_dt_ids[] = { @@ -573,7 +572,7 @@ static struct platform_driver imx8qm_ldb_driver = { .probe = imx8qm_ldb_probe, .remove_new = imx8qm_ldb_remove, .driver = { - .pm = &imx8qm_ldb_pm_ops, + .pm = pm_ptr(&imx8qm_ldb_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qm_ldb_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 7984da9c0a35..b33011f397f0 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -678,12 +678,12 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev) +static int imx8qxp_ldb_runtime_suspend(struct device *dev) { return 0; } -static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev) +static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qxp_ldb->base; @@ -695,8 +695,7 @@ static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, - imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { @@ -709,7 +708,7 @@ static struct platform_driver imx8qxp_ldb_driver = { .probe = imx8qxp_ldb_probe, .remove_new = imx8qxp_ldb_remove, .driver = { - .pm = &imx8qxp_ldb_pm_ops, + .pm = pm_ptr(&imx8qxp_ldb_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qxp_ldb_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index e6dbbdc87ce2..ce43e4069e21 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -371,7 +371,7 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) +static int imx8qxp_pc_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); @@ -393,7 +393,7 @@ static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) return ret; } -static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev) +static int imx8qxp_pc_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); @@ -415,8 +415,7 @@ static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_pc_pm_ops = { - SET_RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, - imx8qxp_pc_runtime_resume, NULL) + RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, imx8qxp_pc_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_pc_dt_ids[] = { @@ -430,7 +429,7 @@ static struct platform_driver imx8qxp_pc_bridge_driver = { .probe = imx8qxp_pc_bridge_probe, .remove_new = imx8qxp_pc_bridge_remove, .driver = { - .pm = &imx8qxp_pc_pm_ops, + .pm = pm_ptr(&imx8qxp_pc_pm_ops), .name = DRIVER_NAME, .of_match_table = imx8qxp_pc_dt_ids, }, diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c new file mode 100644 index 000000000000..cbabd4e20d3e --- /dev/null +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -0,0 +1,898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 NXP + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> + +/* ----------------------------------------------------------------------------- + * LVDS registers + */ + +/* LVDS software reset registers */ +#define LVDS_REG_05 0x05 +#define REG_SOFT_P_RST BIT(1) + +/* LVDS system configuration registers */ +/* 0x0b */ +#define LVDS_REG_0B 0x0b +#define REG_SSC_PCLK_RF BIT(0) +#define REG_LVDS_IN_SWAP BIT(1) + +/* LVDS test pattern gen control registers */ +/* 0x2c */ +#define LVDS_REG_2C 0x2c +#define REG_COL_DEP GENMASK(1, 0) +#define BIT8 FIELD_PREP(REG_COL_DEP, 1) +#define OUT_MAP BIT(4) +#define JEIDA 0 +#define REG_DESSC_ENB BIT(6) +#define DMODE BIT(7) +#define DISO BIT(7) +#define SISO 0 + +#define LVDS_REG_3C 0x3c +#define LVDS_REG_3F 0x3f +#define LVDS_REG_47 0x47 +#define LVDS_REG_48 0x48 +#define LVDS_REG_4F 0x4f +#define LVDS_REG_52 0x52 + +/* ----------------------------------------------------------------------------- + * HDMI registers are separated into three banks: + * 1) HDMI register common bank: 0x00 ~ 0x2f + */ + +/* HDMI genernal registers */ +#define HDMI_REG_SW_RST 0x04 +#define SOFTREF_RST BIT(5) +#define SOFTA_RST BIT(4) +#define SOFTV_RST BIT(3) +#define AUD_RST BIT(2) +#define HDCP_RST BIT(0) +#define HDMI_RST_ALL (SOFTREF_RST | SOFTA_RST | SOFTV_RST | \ + AUD_RST | HDCP_RST) + +#define HDMI_REG_SYS_STATUS 0x0e +#define HPDETECT BIT(6) +#define TXVIDSTABLE BIT(4) + +#define HDMI_REG_BANK_CTRL 0x0f +#define REG_BANK_SEL BIT(0) + +/* HDMI System DDC control registers */ +#define HDMI_REG_DDC_MASTER_CTRL 0x10 +#define MASTER_SEL_HOST BIT(0) + +#define HDMI_REG_DDC_HEADER 0x11 + +#define HDMI_REG_DDC_REQOFF 0x12 +#define HDMI_REG_DDC_REQCOUNT 0x13 +#define HDMI_REG_DDC_EDIDSEG 0x14 + +#define HDMI_REG_DDC_CMD 0x15 +#define DDC_CMD_EDID_READ 0x3 +#define DDC_CMD_FIFO_CLR 0x9 + +#define HDMI_REG_DDC_STATUS 0x16 +#define DDC_DONE BIT(7) +#define DDC_NOACK BIT(5) +#define DDC_WAITBUS BIT(4) +#define DDC_ARBILOSE BIT(3) +#define DDC_ERROR (DDC_NOACK | DDC_WAITBUS | DDC_ARBILOSE) + +#define HDMI_DDC_FIFO_BYTES 32 +#define HDMI_REG_DDC_READFIFO 0x17 +#define HDMI_REG_LVDS_PORT 0x1d /* LVDS input control I2C addr */ +#define HDMI_REG_LVDS_PORT_EN 0x1e +#define LVDS_INPUT_CTRL_I2C_ADDR 0x33 + +/* ----------------------------------------------------------------------------- + * 2) HDMI register bank0: 0x30 ~ 0xff + */ + +/* HDMI AFE registers */ +#define HDMI_REG_AFE_DRV_CTRL 0x61 +#define AFE_DRV_PWD BIT(5) +#define AFE_DRV_RST BIT(4) + +#define HDMI_REG_AFE_XP_CTRL 0x62 +#define AFE_XP_GAINBIT BIT(7) +#define AFE_XP_ER0 BIT(4) +#define AFE_XP_RESETB BIT(3) + +#define HDMI_REG_AFE_ISW_CTRL 0x63 + +#define HDMI_REG_AFE_IP_CTRL 0x64 +#define AFE_IP_GAINBIT BIT(7) +#define AFE_IP_ER0 BIT(3) +#define AFE_IP_RESETB BIT(2) + +/* HDMI input data format registers */ +#define HDMI_REG_INPUT_MODE 0x70 +#define IN_RGB 0x00 + +/* HDMI general control registers */ +#define HDMI_REG_HDMI_MODE 0xc0 +#define TX_HDMI_MODE BIT(0) + +#define HDMI_REG_GCP 0xc1 +#define AVMUTE BIT(0) +#define HDMI_COLOR_DEPTH GENMASK(6, 4) +#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4) + +#define HDMI_REG_PKT_GENERAL_CTRL 0xc6 +#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd +#define ENABLE_PKT BIT(0) +#define REPEAT_PKT BIT(1) + +/* ----------------------------------------------------------------------------- + * 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers) + */ + +/* AVI packet registers */ +#define HDMI_REG_AVI_DB1 0x158 +#define HDMI_REG_AVI_DB2 0x159 +#define HDMI_REG_AVI_DB3 0x15a +#define HDMI_REG_AVI_DB4 0x15b +#define HDMI_REG_AVI_DB5 0x15c +#define HDMI_REG_AVI_CSUM 0x15d +#define HDMI_REG_AVI_DB6 0x15e +#define HDMI_REG_AVI_DB7 0x15f +#define HDMI_REG_AVI_DB8 0x160 +#define HDMI_REG_AVI_DB9 0x161 +#define HDMI_REG_AVI_DB10 0x162 +#define HDMI_REG_AVI_DB11 0x163 +#define HDMI_REG_AVI_DB12 0x164 +#define HDMI_REG_AVI_DB13 0x165 + +#define HDMI_AVI_DB_CHUNK1_SIZE (HDMI_REG_AVI_DB5 - HDMI_REG_AVI_DB1 + 1) +#define HDMI_AVI_DB_CHUNK2_SIZE (HDMI_REG_AVI_DB13 - HDMI_REG_AVI_DB6 + 1) + +/* IT6263 data sheet Rev0.8: LVDS RX supports input clock rate up to 150MHz. */ +#define MAX_PIXEL_CLOCK_KHZ 150000 + +/* IT6263 programming guide Ver0.90: PCLK_HIGH for TMDS clock over 80MHz. */ +#define HIGH_PIXEL_CLOCK_KHZ 80000 + +/* + * IT6263 data sheet Rev0.8: HDMI TX supports link speeds of up to 2.25Gbps + * (link clock rate of 225MHz). + */ +#define MAX_HDMI_TMDS_CHAR_RATE_HZ 225000000 + +struct it6263 { + struct device *dev; + struct i2c_client *hdmi_i2c; + struct i2c_client *lvds_i2c; + struct regmap *hdmi_regmap; + struct regmap *lvds_regmap; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + int lvds_data_mapping; + bool lvds_dual_link; + bool lvds_link12_swap; +}; + +static inline struct it6263 *bridge_to_it6263(struct drm_bridge *bridge) +{ + return container_of(bridge, struct it6263, bridge); +} + +static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_BANK_CTRL: + case HDMI_REG_DDC_MASTER_CTRL: + case HDMI_REG_DDC_HEADER: + case HDMI_REG_DDC_REQOFF: + case HDMI_REG_DDC_REQCOUNT: + case HDMI_REG_DDC_EDIDSEG: + case HDMI_REG_DDC_CMD: + case HDMI_REG_LVDS_PORT: + case HDMI_REG_LVDS_PORT_EN: + case HDMI_REG_AFE_DRV_CTRL: + case HDMI_REG_AFE_XP_CTRL: + case HDMI_REG_AFE_ISW_CTRL: + case HDMI_REG_AFE_IP_CTRL: + case HDMI_REG_INPUT_MODE: + case HDMI_REG_HDMI_MODE: + case HDMI_REG_GCP: + case HDMI_REG_PKT_GENERAL_CTRL: + case HDMI_REG_AVI_INFOFRM_CTRL: + case HDMI_REG_AVI_DB1: + case HDMI_REG_AVI_DB2: + case HDMI_REG_AVI_DB3: + case HDMI_REG_AVI_DB4: + case HDMI_REG_AVI_DB5: + case HDMI_REG_AVI_CSUM: + case HDMI_REG_AVI_DB6: + case HDMI_REG_AVI_DB7: + case HDMI_REG_AVI_DB8: + case HDMI_REG_AVI_DB9: + case HDMI_REG_AVI_DB10: + case HDMI_REG_AVI_DB11: + case HDMI_REG_AVI_DB12: + case HDMI_REG_AVI_DB13: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_readable_reg(struct device *dev, unsigned int reg) +{ + if (it6263_hdmi_writeable_reg(dev, reg)) + return true; + + switch (reg) { + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static bool it6263_hdmi_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case HDMI_REG_SW_RST: + case HDMI_REG_SYS_STATUS: + case HDMI_REG_DDC_STATUS: + case HDMI_REG_DDC_READFIFO: + return true; + default: + return false; + } +} + +static const struct regmap_range_cfg it6263_hdmi_range_cfg = { + .range_min = 0x00, + .range_max = HDMI_REG_AVI_DB13, + .selector_reg = HDMI_REG_BANK_CTRL, + .selector_mask = REG_BANK_SEL, + .selector_shift = 0, + .window_start = 0x00, + .window_len = 0x100, +}; + +static const struct regmap_config it6263_hdmi_regmap_config = { + .name = "it6263-hdmi", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_hdmi_writeable_reg, + .readable_reg = it6263_hdmi_readable_reg, + .volatile_reg = it6263_hdmi_volatile_reg, + .max_register = HDMI_REG_AVI_DB13, + .ranges = &it6263_hdmi_range_cfg, + .num_ranges = 1, + .cache_type = REGCACHE_MAPLE, +}; + +static bool it6263_lvds_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LVDS_REG_05: + case LVDS_REG_0B: + case LVDS_REG_2C: + case LVDS_REG_3C: + case LVDS_REG_3F: + case LVDS_REG_47: + case LVDS_REG_48: + case LVDS_REG_4F: + case LVDS_REG_52: + return true; + default: + return false; + } +} + +static bool it6263_lvds_readable_reg(struct device *dev, unsigned int reg) +{ + return it6263_lvds_writeable_reg(dev, reg); +} + +static bool it6263_lvds_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == LVDS_REG_05; +} + +static const struct regmap_config it6263_lvds_regmap_config = { + .name = "it6263-lvds", + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = it6263_lvds_writeable_reg, + .readable_reg = it6263_lvds_readable_reg, + .volatile_reg = it6263_lvds_volatile_reg, + .max_register = LVDS_REG_52, + .cache_type = REGCACHE_MAPLE, +}; + +static const char * const it6263_supplies[] = { + "ivdd", "ovdd", "txavcc18", "txavcc33", "pvcc1", "pvcc2", + "avcc", "anvdd", "apvdd" +}; + +static int it6263_parse_dt(struct it6263 *it) +{ + struct device *dev = it->dev; + struct device_node *port0, *port1; + int ret = 0; + + it->lvds_data_mapping = drm_of_lvds_get_data_mapping(dev->of_node); + if (it->lvds_data_mapping < 0) { + dev_err(dev, "%pOF: invalid or missing %s DT property: %d\n", + dev->of_node, "data-mapping", it->lvds_data_mapping); + return it->lvds_data_mapping; + } + + it->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0); + if (IS_ERR(it->next_bridge)) + return dev_err_probe(dev, PTR_ERR(it->next_bridge), + "failed to get next bridge\n"); + + port0 = of_graph_get_port_by_id(dev->of_node, 0); + port1 = of_graph_get_port_by_id(dev->of_node, 1); + if (port0 && port1) { + int order; + + it->lvds_dual_link = true; + order = drm_of_lvds_get_dual_link_pixel_order_sink(port0, port1); + if (order < 0) { + dev_err(dev, + "failed to get dual link pixel order: %d\n", + order); + ret = order; + } else if (order == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { + it->lvds_link12_swap = true; + } + } else if (port1) { + ret = -EINVAL; + dev_err(dev, "single input LVDS port1 is not supported\n"); + } else if (!port0) { + ret = -EINVAL; + dev_err(dev, "no input LVDS port\n"); + } + + of_node_put(port0); + of_node_put(port1); + + return ret; +} + +static inline void it6263_hw_reset(struct gpio_desc *reset_gpio) +{ + if (!reset_gpio) + return; + + gpiod_set_value_cansleep(reset_gpio, 0); + fsleep(1000); + gpiod_set_value_cansleep(reset_gpio, 1); + /* The chip maker says the low pulse should be at least 40ms. */ + fsleep(40000); + gpiod_set_value_cansleep(reset_gpio, 0); + /* addtional time to wait the high voltage to be stable */ + fsleep(5000); +} + +static inline int it6263_lvds_set_i2c_addr(struct it6263 *it) +{ + int ret; + + ret = regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT, + LVDS_INPUT_CTRL_I2C_ADDR << 1); + if (ret) + return ret; + + return regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT_EN, BIT(0)); +} + +static inline void it6263_lvds_reset(struct it6263 *it) +{ + /* AFE PLL reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), 0x0); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), BIT(0)); + + /* software pixel clock domain reset */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, + REG_SOFT_P_RST); + fsleep(1000); + regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, 0x0); + fsleep(10000); +} + +static inline void it6263_lvds_set_interface(struct it6263 *it) +{ + /* color depth */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8); + /* output mapping */ + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA); + + if (it->lvds_dual_link) { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), BIT(1)); + } else { + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, SISO); + regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), 0); + } +} + +static inline void it6263_lvds_set_afe(struct it6263 *it) +{ + regmap_write(it->lvds_regmap, LVDS_REG_3C, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_3F, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_47, 0xaa); + regmap_write(it->lvds_regmap, LVDS_REG_48, 0x02); + regmap_write(it->lvds_regmap, LVDS_REG_4F, 0x11); + + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_SSC_PCLK_RF, + REG_SSC_PCLK_RF); + regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, 0x07, 0); + regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_DESSC_ENB, + REG_DESSC_ENB); +} + +static inline void it6263_lvds_sys_cfg(struct it6263 *it) +{ + regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_LVDS_IN_SWAP, + it->lvds_link12_swap ? REG_LVDS_IN_SWAP : 0); +} + +static inline void it6263_lvds_config(struct it6263 *it) +{ + it6263_lvds_reset(it); + it6263_lvds_set_interface(it); + it6263_lvds_set_afe(it); + it6263_lvds_sys_cfg(it); +} + +static inline void it6263_hdmi_config(struct it6263 *it) +{ + regmap_write(it->hdmi_regmap, HDMI_REG_SW_RST, HDMI_RST_ALL); + regmap_write(it->hdmi_regmap, HDMI_REG_INPUT_MODE, IN_RGB); + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, HDMI_COLOR_DEPTH, + HDMI_COLOR_DEPTH_24); +} + +static enum drm_connector_status it6263_detect(struct it6263 *it) +{ + unsigned int val; + + regmap_read(it->hdmi_regmap, HDMI_REG_SYS_STATUS, &val); + if (val & HPDETECT) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct it6263 *it = data; + struct regmap *regmap = it->hdmi_regmap; + unsigned int start = (block % 2) * EDID_LENGTH; + unsigned int segment = block >> 1; + unsigned int count, val; + int ret; + + regmap_write(regmap, HDMI_REG_DDC_MASTER_CTRL, MASTER_SEL_HOST); + regmap_write(regmap, HDMI_REG_DDC_HEADER, DDC_ADDR << 1); + regmap_write(regmap, HDMI_REG_DDC_EDIDSEG, segment); + + while (len) { + /* clear DDC FIFO */ + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_FIFO_CLR); + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, + val, val & DDC_DONE, + 2000, 10000); + if (ret) { + dev_err(it->dev, "failed to clear DDC FIFO:%d\n", ret); + return ret; + } + + count = len > HDMI_DDC_FIFO_BYTES ? HDMI_DDC_FIFO_BYTES : len; + + /* fire the read command */ + regmap_write(regmap, HDMI_REG_DDC_REQOFF, start); + regmap_write(regmap, HDMI_REG_DDC_REQCOUNT, count); + regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_EDID_READ); + + start += count; + len -= count; + + ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, val, + val & (DDC_DONE | DDC_ERROR), + 20000, 250000); + if (ret && !(val & DDC_ERROR)) { + dev_err(it->dev, "failed to read EDID:%d\n", ret); + return ret; + } + + if (val & DDC_ERROR) { + dev_err(it->dev, "DDC error\n"); + return -EIO; + } + + /* cache to buffer */ + for (; count > 0; count--) { + regmap_read(regmap, HDMI_REG_DDC_READFIFO, &val); + *(buf++) = val; + } + } + + return 0; +} + +static int it6263_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); +} + +static void +it6263_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, AVMUTE, AVMUTE); + regmap_write(it->hdmi_regmap, HDMI_REG_PKT_GENERAL_CTRL, 0); + regmap_write(it->hdmi_regmap, HDMI_REG_AFE_DRV_CTRL, + AFE_DRV_RST | AFE_DRV_PWD); +} + +static void +it6263_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct it6263 *it = bridge_to_it6263(bridge); + const struct drm_crtc_state *crtc_state; + struct regmap *regmap = it->hdmi_regmap; + const struct drm_display_mode *mode; + struct drm_connector *connector; + bool is_stable = false; + struct drm_crtc *crtc; + unsigned int val; + bool pclk_high; + int i, ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + + regmap_write(regmap, HDMI_REG_HDMI_MODE, TX_HDMI_MODE); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); + + /* HDMI AFE setup */ + pclk_high = mode->clock > HIGH_PIXEL_CLOCK_KHZ; + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, AFE_DRV_RST); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_GAINBIT | AFE_XP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_XP_CTRL, + AFE_XP_ER0 | AFE_XP_RESETB); + regmap_write(regmap, HDMI_REG_AFE_ISW_CTRL, 0x10); + if (pclk_high) + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_GAINBIT | AFE_IP_RESETB); + else + regmap_write(regmap, HDMI_REG_AFE_IP_CTRL, + AFE_IP_ER0 | AFE_IP_RESETB); + + /* HDMI software video reset */ + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, SOFTV_RST); + fsleep(1000); + regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, 0); + + /* reconfigure LVDS and retry several times in case video is instable */ + for (i = 0; i < 3; i++) { + ret = regmap_read_poll_timeout(regmap, HDMI_REG_SYS_STATUS, val, + val & TXVIDSTABLE, + 20000, 500000); + if (!ret) { + is_stable = true; + break; + } + + it6263_lvds_config(it); + } + + if (!is_stable) + dev_warn(it->dev, "failed to wait for video stable\n"); + + /* HDMI AFE reset release and power up */ + regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, 0); + + regmap_write_bits(regmap, HDMI_REG_GCP, AVMUTE, 0); + + regmap_write(regmap, HDMI_REG_PKT_GENERAL_CTRL, ENABLE_PKT | REPEAT_PKT); +} + +static enum drm_mode_status +it6263_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate == 0) + return MODE_NOCLOCK; + + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int it6263_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct drm_connector *connector; + int ret; + + ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge, + flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) + return ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) + return 0; + + connector = drm_bridge_connector_init(bridge->dev, bridge->encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + dev_err(it->dev, "failed to initialize bridge connector: %d\n", + ret); + return ret; + } + + drm_connector_attach_encoder(connector, bridge->encoder); + + return 0; +} + +static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return it6263_detect(it); +} + +static const struct drm_edid * +it6263_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + return drm_edid_read_custom(connector, it6263_read_edid, it); +} + +static u32 * +it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct it6263 *it = bridge_to_it6263(bridge); + u32 *input_fmts; + + *num_input_fmts = 0; + + if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) + return NULL; + + input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + *num_input_fmts = 1; + + return input_fmts; +} + +static enum drm_mode_status +it6263_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + if (mode->clock > MAX_PIXEL_CLOCK_KHZ) + return MODE_CLOCK_HIGH; + + if (tmds_rate > MAX_HDMI_TMDS_CHAR_RATE_HZ) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct it6263 *it = bridge_to_it6263(bridge); + + if (type == HDMI_INFOFRAME_TYPE_AVI) + regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0); + else + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + + return 0; +} + +static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct it6263 *it = bridge_to_it6263(bridge); + struct regmap *regmap = it->hdmi_regmap; + + if (type != HDMI_INFOFRAME_TYPE_AVI) { + dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + return 0; + } + + /* write the first AVI infoframe data byte chunk(DB1-DB5) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB1, + &buffer[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_AVI_DB_CHUNK1_SIZE); + + /* write the second AVI infoframe data byte chunk(DB6-DB13) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB6, + &buffer[HDMI_INFOFRAME_HEADER_SIZE + + HDMI_AVI_DB_CHUNK1_SIZE], + HDMI_AVI_DB_CHUNK2_SIZE); + + /* write checksum */ + regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]); + + regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT); + + return 0; +} + +static const struct drm_bridge_funcs it6263_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = it6263_bridge_attach, + .mode_valid = it6263_bridge_mode_valid, + .atomic_disable = it6263_bridge_atomic_disable, + .atomic_enable = it6263_bridge_atomic_enable, + .atomic_check = it6263_bridge_atomic_check, + .detect = it6263_bridge_detect, + .edid_read = it6263_bridge_edid_read, + .atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts, + .hdmi_tmds_char_rate_valid = it6263_hdmi_tmds_char_rate_valid, + .hdmi_clear_infoframe = it6263_hdmi_clear_infoframe, + .hdmi_write_infoframe = it6263_hdmi_write_infoframe, +}; + +static int it6263_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct gpio_desc *reset_gpio; + struct it6263 *it; + int ret; + + it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL); + if (!it) + return -ENOMEM; + + it->dev = dev; + it->hdmi_i2c = client; + + it->hdmi_regmap = devm_regmap_init_i2c(client, + &it6263_hdmi_regmap_config); + if (IS_ERR(it->hdmi_regmap)) + return dev_err_probe(dev, PTR_ERR(it->hdmi_regmap), + "failed to init I2C regmap for HDMI\n"); + + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "failed to get reset gpio\n"); + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it6263_supplies), + it6263_supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get power supplies\n"); + + ret = it6263_parse_dt(it); + if (ret) + return ret; + + it6263_hw_reset(reset_gpio); + + ret = it6263_lvds_set_i2c_addr(it); + if (ret) + return dev_err_probe(dev, ret, "failed to set I2C addr\n"); + + it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter, + LVDS_INPUT_CTRL_I2C_ADDR); + if (IS_ERR(it->lvds_i2c)) + dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c), + "failed to allocate I2C device for LVDS\n"); + + it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c, + &it6263_lvds_regmap_config); + if (IS_ERR(it->lvds_regmap)) + return dev_err_probe(dev, PTR_ERR(it->lvds_regmap), + "failed to init I2C regmap for LVDS\n"); + + it6263_lvds_config(it); + it6263_hdmi_config(it); + + i2c_set_clientdata(client, it); + + it->bridge.funcs = &it6263_bridge_funcs; + it->bridge.of_node = dev->of_node; + /* IT6263 chip doesn't support HPD interrupt. */ + it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI; + it->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + it->bridge.vendor = "ITE"; + it->bridge.product = "IT6263"; + + return devm_drm_bridge_add(dev, &it->bridge); +} + +static const struct of_device_id it6263_of_match[] = { + { .compatible = "ite,it6263", }, + { } +}; +MODULE_DEVICE_TABLE(of, it6263_of_match); + +static const struct i2c_device_id it6263_i2c_ids[] = { + { "it6263", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids); + +static struct i2c_driver it6263_driver = { + .probe = it6263_probe, + .driver = { + .name = "it6263", + .of_match_table = it6263_of_match, + }, + .id_table = it6263_i2c_ids, +}; +module_i2c_driver(it6263_driver); + +MODULE_DESCRIPTION("ITE Tech. Inc. IT6263 LVDS/HDMI bridge"); +MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 87b8545fccc0..008d86cc562a 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2614,9 +2614,9 @@ static int it6505_poweron(struct it6505 *it6505) /* time interval between OVDD and SYSRSTN at least be 10ms */ if (pdata->gpiod_reset) { usleep_range(10000, 20000); - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); - usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); usleep_range(25000, 35000); } @@ -2647,7 +2647,7 @@ static int it6505_poweroff(struct it6505 *it6505) disable_irq_nosync(it6505->irq); if (pdata->gpiod_reset) - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); if (pdata->pwr18) { err = regulator_disable(pdata->pwr18); @@ -3107,6 +3107,8 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev) { struct it6505 *it6505 = dev_get_drvdata(dev); + it6505_remove_edid(it6505); + return it6505_poweroff(it6505); } @@ -3133,7 +3135,7 @@ static int it6505_init_pdata(struct it6505 *it6505) return PTR_ERR(pdata->ovdd); } - pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pdata->gpiod_reset)) { dev_err(dev, "gpiod_reset gpio not found"); return PTR_ERR(pdata->gpiod_reset); @@ -3505,6 +3507,7 @@ static const struct of_device_id it6505_of_match[] = { { .compatible = "ite,it6505" }, { } }; +MODULE_DEVICE_TABLE(of, it6505_of_match); static struct i2c_driver it6505_i2c_driver = { .driver = { diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 925e42f46cd8..35ae3f0e8f51 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -770,8 +770,6 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, mutex_lock(&ctx->lock); - hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe); - ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector, adjusted_mode); if (ret) { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 73983f9b50cb..1b31fdebe164 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -23,6 +23,8 @@ #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> #define EDID_SEG_SIZE 256 #define EDID_LEN 32 @@ -333,49 +335,6 @@ end: return temp; } -static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611, - struct drm_connector *connector, - struct drm_display_mode *mode) -{ - union hdmi_infoframe infoframe; - ssize_t len; - u8 iframes = 0x0a; /* UD1 infoframe */ - u8 buf[32]; - int ret; - int i; - - ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8440 + i, buf[i]); - - ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, - connector, - mode); - if (ret < 0) - goto out; - - len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); - if (len < 0) - goto out; - - for (i = 0; i < len; i++) - regmap_write(lt9611->regmap, 0x8474 + i, buf[i]); - - iframes |= 0x20; - -out: - regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */ -} - static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi) { if (is_hdmi) @@ -719,7 +678,7 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge, } lt9611_mipi_input_analog(lt9611); - lt9611_hdmi_set_infoframes(lt9611, connector, mode); + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi); lt9611_hdmi_tx_phy(lt9611); @@ -798,22 +757,25 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned long long rate; if (mode->hdisplay > 3840) return MODE_BAD_HVALUE; - if (mode->vdisplay > 2160) - return MODE_BAD_VVALUE; - - if (mode->hdisplay == 3840 && - mode->vdisplay == 2160 && - drm_mode_vrefresh(mode) > 30) - return MODE_CLOCK_HIGH; - if (mode->hdisplay > 2000 && !lt9611->dsi1_node) return MODE_PANEL; - else - return MODE_OK; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate); +} + +static int lt9611_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + return drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); } static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge, @@ -887,6 +849,99 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge, return input_fmts; } +/* + * Other working frames: + * - 0x01, 0x84df + * - 0x04, 0x84c0 + */ +#define LT9611_INFOFRAME_AUDIO 0x02 +#define LT9611_INFOFRAME_AVI 0x08 +#define LT9611_INFOFRAME_SPD 0x10 +#define LT9611_INFOFRAME_VENDOR 0x20 + +static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) + regmap_update_bits(lt9611->regmap, 0x843d, mask, 0); + + return 0; +} + +static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + unsigned int mask, addr; + int i; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + mask = LT9611_INFOFRAME_AVI; + addr = 0x8440; + break; + + case HDMI_INFOFRAME_TYPE_SPD: + mask = LT9611_INFOFRAME_SPD; + addr = 0x8493; + break; + + case HDMI_INFOFRAME_TYPE_VENDOR: + mask = LT9611_INFOFRAME_VENDOR; + addr = 0x8474; + break; + + default: + drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); + mask = 0; + break; + } + + if (mask) { + for (i = 0; i < len; i++) + regmap_write(lt9611->regmap, addr + i, buffer[i]); + + regmap_update_bits(lt9611->regmap, 0x843d, mask, mask); + } + + return 0; +} + +static enum drm_mode_status +lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + /* 297 MHz for 4k@30 mode */ + if (tmds_rate > 297000000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, .mode_valid = lt9611_bridge_mode_valid, @@ -894,6 +949,7 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .edid_read = lt9611_bridge_edid_read, .hpd_enable = lt9611_bridge_hpd_enable, + .atomic_check = lt9611_bridge_atomic_check, .atomic_pre_enable = lt9611_bridge_atomic_pre_enable, .atomic_enable = lt9611_bridge_atomic_enable, .atomic_disable = lt9611_bridge_atomic_disable, @@ -902,6 +958,10 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = { .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts, + + .hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid, + .hdmi_write_infoframe = lt9611_hdmi_write_infoframe, + .hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe, }; static int lt9611_parse_dt(struct device *dev, @@ -1116,8 +1176,11 @@ static int lt9611_probe(struct i2c_client *client) lt9611->bridge.funcs = <9611_bridge_funcs; lt9611->bridge.of_node = client->dev.of_node; lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | - DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES; + DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES | + DRM_BRIDGE_OP_HDMI; lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + lt9611->bridge.vendor = "Lontium"; + lt9611->bridge.product = "LT9611"; drm_bridge_add(<9611->bridge); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 430f8adebf9c..4416d0be7272 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -2043,7 +2043,7 @@ void samsung_dsim_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(samsung_dsim_remove); -static int __maybe_unused samsung_dsim_suspend(struct device *dev) +static int samsung_dsim_suspend(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -2073,7 +2073,7 @@ static int __maybe_unused samsung_dsim_suspend(struct device *dev) return 0; } -static int __maybe_unused samsung_dsim_resume(struct device *dev) +static int samsung_dsim_resume(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -2108,7 +2108,7 @@ err_clk: } const struct dev_pm_ops samsung_dsim_pm_ops = { - SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) + RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; @@ -2142,7 +2142,7 @@ static struct platform_driver samsung_dsim_driver = { .remove_new = samsung_dsim_remove, .driver = { .name = "samsung-dsim", - .pm = &samsung_dsim_pm_ops, + .pm = pm_ptr(&samsung_dsim_pm_ops), .of_match_table = samsung_dsim_of_match, }, }; diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 7f91b0db161e..9be9cc5b9025 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -180,6 +180,8 @@ struct sii902x { struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; bool sink_is_hdmi; + u32 bus_width; + /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. @@ -477,6 +479,8 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, u32 output_fmt, unsigned int *num_input_fmts) { + + struct sii902x *sii902x = bridge_to_sii902x(bridge); u32 *input_fmts; *num_input_fmts = 0; @@ -485,7 +489,20 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, if (!input_fmts) return NULL; - input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + switch (sii902x->bus_width) { + case 16: + input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16; + break; + case 18: + input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18; + break; + case 24: + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + break; + default: + return NULL; + } + *num_input_fmts = 1; return input_fmts; @@ -1167,6 +1184,11 @@ static int sii902x_probe(struct i2c_client *client) return PTR_ERR(sii902x->reset_gpio); } + sii902x->bus_width = 24; + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + if (endpoint) + of_property_read_u32(endpoint, "bus-width", &sii902x->bus_width); + endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); if (endpoint) { struct device_node *remote = of_graph_get_remote_port_parent(endpoint); diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 15fc182d05ef..ca416dab156d 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -46,6 +46,14 @@ config DRM_DW_HDMI_CEC Support the CE interface which is part of the Synopsys Designware HDMI block. +config DRM_DW_HDMI_QP + tristate + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_KMS_HELPER + select REGMAP_MMIO + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile index ce715562e9e5..9869d9651ed1 100644 --- a/drivers/gpu/drm/bridge/synopsys/Makefile +++ b/drivers/gpu/drm/bridge/synopsys/Makefile @@ -5,4 +5,6 @@ obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o +obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o + obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index 673661160e54..d4614de1ae1e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -312,7 +312,7 @@ static void dw_hdmi_cec_remove(struct platform_device *pdev) cec_unregister_adapter(cec->adap); } -static int __maybe_unused dw_hdmi_cec_resume(struct device *dev) +static int dw_hdmi_cec_resume(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); @@ -328,7 +328,7 @@ static int __maybe_unused dw_hdmi_cec_resume(struct device *dev) return 0; } -static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev) +static int dw_hdmi_cec_suspend(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); @@ -341,7 +341,7 @@ static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev) } static const struct dev_pm_ops dw_hdmi_cec_pm = { - SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume) + SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume) }; static struct platform_driver dw_hdmi_cec_driver = { @@ -349,7 +349,7 @@ static struct platform_driver dw_hdmi_cec_driver = { .remove_new = dw_hdmi_cec_remove, .driver = { .name = "dw-hdmi-cec", - .pm = &dw_hdmi_cec_pm, + .pm = pm_ptr(&dw_hdmi_cec_pm), }, }; module_platform_driver(dw_hdmi_cec_driver); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c new file mode 100644 index 000000000000..181c5164b231 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ +#include <linux/completion.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_modes.h> + +#include <sound/hdmi-codec.h> + +#include "dw-hdmi-qp.h" + +#define DDC_CI_ADDR 0x37 +#define DDC_SEGMENT_ADDR 0x30 + +#define HDMI14_MAX_TMDSCLK 340000000 + +#define SCRAMB_POLL_DELAY_MS 3000 + +struct dw_hdmi_qp_i2c { + struct i2c_adapter adap; + + struct mutex lock; /* used to serialize data transfers */ + struct completion cmp; + u8 stat; + + u8 slave_reg; + bool is_regaddr; + bool is_segment; +}; + +struct dw_hdmi_qp { + struct drm_bridge bridge; + + struct device *dev; + struct dw_hdmi_qp_i2c *i2c; + + struct { + const struct dw_hdmi_qp_phy_ops *ops; + void *data; + } phy; + + struct regmap *regm; +}; + +static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val, + int offset) +{ + regmap_write(hdmi->regm, offset, val); +} + +static unsigned int dw_hdmi_qp_read(struct dw_hdmi_qp *hdmi, int offset) +{ + unsigned int val = 0; + + regmap_read(hdmi->regm, offset, &val); + + return val; +} + +static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data, + unsigned int mask, unsigned int reg) +{ + regmap_update_bits(hdmi->regm, reg, mask, data); +} + +static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + dev_dbg(hdmi->dev, "set read register address to 0\n"); + i2c->slave_reg = 0x00; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + + if (i2c->is_segment) + dw_hdmi_qp_mod(hdmi, I2CM_EXT_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + else + dw_hdmi_qp_mod(hdmi, I2CM_FM_READ, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c read timed out\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c read error\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + *buf++ = dw_hdmi_qp_read(hdmi, I2CM_INTERFACE_RDDATA_0_3) & 0xff; + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + i2c->is_segment = false; + + return 0; +} + +static int dw_hdmi_qp_i2c_write(struct dw_hdmi_qp *hdmi, + unsigned char *buf, unsigned int length) +{ + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + int stat; + + if (!i2c->is_regaddr) { + /* Use the first write byte as register address */ + i2c->slave_reg = buf[0]; + length--; + buf++; + i2c->is_regaddr = true; + } + + while (length--) { + reinit_completion(&i2c->cmp); + + dw_hdmi_qp_write(hdmi, *buf++, I2CM_INTERFACE_WRDATA_0_3); + dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR, + I2CM_INTERFACE_CONTROL0); + dw_hdmi_qp_mod(hdmi, I2CM_FM_WRITE, I2CM_WR_MASK, + I2CM_INTERFACE_CONTROL0); + + stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); + if (!stat) { + dev_err(hdmi->dev, "i2c write time out!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EAGAIN; + } + + /* Check for error condition on the bus */ + if (i2c->stat & I2CM_NACK_RCVD_IRQ) { + dev_err(hdmi->dev, "i2c write nack!\n"); + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + return -EIO; + } + + dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0); + } + + return 0; +} + +static int dw_hdmi_qp_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct dw_hdmi_qp *hdmi = i2c_get_adapdata(adap); + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u8 addr = msgs[0].addr; + int i, ret = 0; + + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * FIXME: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + + for (i = 0; i < num; i++) { + if (msgs[i].len == 0) { + dev_err(hdmi->dev, + "unsupported transfer %d/%d, no data\n", + i + 1, num); + return -EOPNOTSUPP; + } + } + + guard(mutex)(&i2c->lock); + + /* Unmute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N, + MAINUNIT_1_INT_MASK_N); + + /* Set slave device address taken from the first I2C message */ + if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1) + addr = DDC_ADDR; + + dw_hdmi_qp_mod(hdmi, addr << 5, I2CM_SLVADDR, I2CM_INTERFACE_CONTROL0); + + /* Set slave device register address on transfer */ + i2c->is_regaddr = false; + + /* Set segment pointer for I2C extended read mode operation */ + i2c->is_segment = false; + + for (i = 0; i < num; i++) { + if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) { + i2c->is_segment = true; + dw_hdmi_qp_mod(hdmi, DDC_SEGMENT_ADDR, I2CM_SEG_ADDR, + I2CM_INTERFACE_CONTROL1); + dw_hdmi_qp_mod(hdmi, *msgs[i].buf << 7, I2CM_SEG_PTR, + I2CM_INTERFACE_CONTROL1); + } else { + if (msgs[i].flags & I2C_M_RD) + ret = dw_hdmi_qp_i2c_read(hdmi, msgs[i].buf, + msgs[i].len); + else + ret = dw_hdmi_qp_i2c_write(hdmi, msgs[i].buf, + msgs[i].len); + } + if (ret < 0) + break; + } + + if (!ret) + ret = num; + + /* Mute DONE and ERROR interrupts */ + dw_hdmi_qp_mod(hdmi, 0, I2CM_OP_DONE_MASK_N | I2CM_NACK_RCVD_MASK_N, + MAINUNIT_1_INT_MASK_N); + + return ret; +} + +static u32 dw_hdmi_qp_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm dw_hdmi_qp_algorithm = { + .master_xfer = dw_hdmi_qp_i2c_xfer, + .functionality = dw_hdmi_qp_i2c_func, +}; + +static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi) +{ + struct dw_hdmi_qp_i2c *i2c; + struct i2c_adapter *adap; + int ret; + + i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return ERR_PTR(-ENOMEM); + + mutex_init(&i2c->lock); + init_completion(&i2c->cmp); + + adap = &i2c->adap; + adap->owner = THIS_MODULE; + adap->dev.parent = hdmi->dev; + adap->algo = &dw_hdmi_qp_algorithm; + strscpy(adap->name, "DesignWare HDMI QP", sizeof(adap->name)); + + i2c_set_adapdata(adap, hdmi); + + ret = devm_i2c_add_adapter(hdmi->dev, adap); + if (ret) { + dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); + devm_kfree(hdmi->dev, i2c); + return ERR_PTR(ret); + } + + hdmi->i2c = i2c; + dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name); + + return adap; +} + +static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i, j; + + if (len != HDMI_INFOFRAME_SIZE(AVI)) { + dev_err(hdmi->dev, "failed to configure avi infoframe\n"); + return -EINVAL; + } + + /* + * DW HDMI QP IP uses a different byte format from standard AVI info + * frames, though generally the bits are in the correct bytes. + */ + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + if (i * 4 + j >= 14) + break; + if (!j) + val = buffer[i * 4 + j + 3]; + val |= buffer[i * 4 + j + 3] << (8 * j); + } + + dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + + dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi, + const u8 *buffer, size_t len) +{ + u32 val, i; + + if (len != HDMI_INFOFRAME_SIZE(DRM)) { + dev_err(hdmi->dev, "failed to configure drm infoframe\n"); + return -EINVAL; + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + + val = buffer[1] << 8 | buffer[2] << 16; + dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0); + + for (i = 0; i <= buffer[2]; i++) { + if (i % 4 == 0) + val = buffer[3 + i]; + val |= buffer[3 + i] << ((i % 4) * 8); + + if ((i % 4 == 3) || i == buffer[2]) + dw_hdmi_qp_write(hdmi, val, + PKT_DRMI_CONTENTS1 + ((i / 4) * 4)); + } + + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1); + dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN, + PKTSCHED_PKT_EN); + + return 0; +} + +static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + int ret; + + ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector, + conn_state->state); + if (ret) + dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret); + + return ret; +} + +static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + struct drm_atomic_state *state = old_state->base.state; + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int op_mode; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + if (connector->display_info.is_hdmi) { + dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", + __func__, conn_state->hdmi.tmds_char_rate); + op_mode = 0; + } else { + dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__); + op_mode = OPMODE_DVI; + } + + hdmi->phy.ops->init(hdmi, hdmi->phy.data); + + dw_hdmi_qp_mod(hdmi, HDCP2_BYPASS, HDCP2_BYPASS, HDCP2LOGIC_CONFIG0); + dw_hdmi_qp_mod(hdmi, op_mode, OPMODE_DVI, LINK_CONFIG0); + + drm_atomic_helper_connector_hdmi_update_infoframes(connector, state); +} + +static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + hdmi->phy.ops->disable(hdmi, hdmi->phy.data); +} + +static enum drm_connector_status +dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); +} + +static const struct drm_edid * +dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + const struct drm_edid *drm_edid; + + drm_edid = drm_edid_read_ddc(connector, bridge->ddc); + if (!drm_edid) + dev_dbg(hdmi->dev, "failed to get edid\n"); + + return drm_edid; +} + +static enum drm_mode_status +dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + unsigned long long rate; + + rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB); + if (rate > HDMI14_MAX_TMDSCLK) { + dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock); + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, + PKTSCHED_PKT_EN); + break; + + case HDMI_INFOFRAME_TYPE_DRM: + dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN); + break; + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + } + + return 0; +} + +static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct dw_hdmi_qp *hdmi = bridge->driver_private; + + dw_hdmi_qp_bridge_clear_infoframe(bridge, type); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len); + + case HDMI_INFOFRAME_TYPE_DRM: + return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len); + + default: + dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type); + return 0; + } +} + +static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_check = dw_hdmi_qp_bridge_atomic_check, + .atomic_enable = dw_hdmi_qp_bridge_atomic_enable, + .atomic_disable = dw_hdmi_qp_bridge_atomic_disable, + .detect = dw_hdmi_qp_bridge_detect, + .edid_read = dw_hdmi_qp_bridge_edid_read, + .mode_valid = dw_hdmi_qp_bridge_mode_valid, + .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe, + .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe, +}; + +static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_i2c *i2c = hdmi->i2c; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, MAINUNIT_1_INT_STATUS); + + i2c->stat = stat & (I2CM_OP_DONE_IRQ | I2CM_READ_REQUEST_IRQ | + I2CM_NACK_RCVD_IRQ); + + if (i2c->stat) { + dw_hdmi_qp_write(hdmi, i2c->stat, MAINUNIT_1_INT_CLEAR); + complete(&i2c->cmp); + } + + if (stat) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct regmap_config dw_hdmi_qp_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = EARCRX_1_INT_FORCE, +}; + +static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); + dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + + /* Software reset */ + dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); + + dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); + + dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); + + /* Clear DONE and ERROR interrupts */ + dw_hdmi_qp_write(hdmi, I2CM_OP_DONE_CLEAR | I2CM_NACK_RCVD_CLEAR, + MAINUNIT_1_INT_CLEAR); + + if (hdmi->phy.ops->setup_hpd) + hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); +} + +struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_qp_plat_data *plat_data) +{ + struct device *dev = &pdev->dev; + struct dw_hdmi_qp *hdmi; + void __iomem *regs; + int ret; + + if (!plat_data->phy_ops || !plat_data->phy_ops->init || + !plat_data->phy_ops->disable || !plat_data->phy_ops->read_hpd) { + dev_err(dev, "Missing platform PHY ops\n"); + return ERR_PTR(-ENODEV); + } + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return ERR_PTR(-ENOMEM); + + hdmi->dev = dev; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + hdmi->regm = devm_regmap_init_mmio(dev, regs, &dw_hdmi_qp_regmap_config); + if (IS_ERR(hdmi->regm)) { + dev_err(dev, "Failed to configure regmap\n"); + return ERR_CAST(hdmi->regm); + } + + hdmi->phy.ops = plat_data->phy_ops; + hdmi->phy.data = plat_data->phy_data; + + dw_hdmi_qp_init_hw(hdmi); + + ret = devm_request_threaded_irq(dev, plat_data->main_irq, + dw_hdmi_qp_main_hardirq, NULL, + IRQF_SHARED, dev_name(dev), hdmi); + if (ret) + return ERR_PTR(ret); + + hdmi->bridge.driver_private = hdmi; + hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs; + hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HPD; + hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + hdmi->bridge.vendor = "Synopsys"; + hdmi->bridge.product = "DW HDMI QP TX"; + + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); + if (IS_ERR(hdmi->bridge.ddc)) + return ERR_CAST(hdmi->bridge.ddc); + + ret = devm_drm_bridge_add(dev, &hdmi->bridge); + if (ret) + return ERR_PTR(ret); + + ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ERR_PTR(ret); + + return hdmi; +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind); + +void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi) +{ + dw_hdmi_qp_init_hw(hdmi); +} +EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume); + +MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>"); +MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>"); +MODULE_DESCRIPTION("DW HDMI QP transmitter library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h new file mode 100644 index 000000000000..2115b8ef0bd6 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Rockchip Electronics Co.Ltd + * Author: + * Algea Cao <algea.cao@rock-chips.com> + */ +#ifndef __DW_HDMI_QP_H__ +#define __DW_HDMI_QP_H__ + +#include <linux/bits.h> + +/* Main Unit Registers */ +#define CORE_ID 0x0 +#define VER_NUMBER 0x4 +#define VER_TYPE 0x8 +#define CONFIG_REG 0xc +#define CONFIG_CEC BIT(28) +#define CONFIG_AUD_UD BIT(23) +#define CORE_TIMESTAMP_HHMM 0x14 +#define CORE_TIMESTAMP_MMDD 0x18 +#define CORE_TIMESTAMP_YYYY 0x1c +/* Reset Manager Registers */ +#define GLOBAL_SWRESET_REQUEST 0x40 +#define EARCRX_CMDC_SWINIT_P BIT(27) +#define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10) +#define GLOBAL_SWDISABLE 0x44 +#define CEC_SWDISABLE BIT(17) +#define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10) +#define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6) +#define RESET_MANAGER_CONFIG0 0x48 +#define RESET_MANAGER_STATUS0 0x50 +#define RESET_MANAGER_STATUS1 0x54 +#define RESET_MANAGER_STATUS2 0x58 +/* Timer Base Registers */ +#define TIMER_BASE_CONFIG0 0x80 +#define TIMER_BASE_STATUS0 0x84 +/* CMU Registers */ +#define CMU_CONFIG0 0xa0 +#define CMU_CONFIG1 0xa4 +#define CMU_CONFIG2 0xa8 +#define CMU_CONFIG3 0xac +#define CMU_STATUS 0xb0 +#define DISPLAY_CLK_MONITOR 0x3f +#define DISPLAY_CLK_LOCKED 0X15 +#define EARC_BPCLK_OFF BIT(9) +#define AUDCLK_OFF BIT(7) +#define LINKQPCLK_OFF BIT(5) +#define VIDQPCLK_OFF BIT(3) +#define IPI_CLK_OFF BIT(1) +#define CMU_IPI_CLK_FREQ 0xb4 +#define CMU_VIDQPCLK_FREQ 0xb8 +#define CMU_LINKQPCLK_FREQ 0xbc +#define CMU_AUDQPCLK_FREQ 0xc0 +#define CMU_EARC_BPCLK_FREQ 0xc4 +/* I2CM Registers */ +#define I2CM_SM_SCL_CONFIG0 0xe0 +#define I2CM_FM_SCL_CONFIG0 0xe4 +#define I2CM_CONFIG0 0xe8 +#define I2CM_CONTROL0 0xec +#define I2CM_STATUS0 0xf0 +#define I2CM_INTERFACE_CONTROL0 0xf4 +#define I2CM_ADDR 0xff000 +#define I2CM_SLVADDR 0xfe0 +#define I2CM_WR_MASK 0x1e +#define I2CM_EXT_READ BIT(4) +#define I2CM_SHORT_READ BIT(3) +#define I2CM_FM_READ BIT(2) +#define I2CM_FM_WRITE BIT(1) +#define I2CM_FM_EN BIT(0) +#define I2CM_INTERFACE_CONTROL1 0xf8 +#define I2CM_SEG_PTR 0x7f80 +#define I2CM_SEG_ADDR 0x7f +#define I2CM_INTERFACE_WRDATA_0_3 0xfc +#define I2CM_INTERFACE_WRDATA_4_7 0x100 +#define I2CM_INTERFACE_WRDATA_8_11 0x104 +#define I2CM_INTERFACE_WRDATA_12_15 0x108 +#define I2CM_INTERFACE_RDDATA_0_3 0x10c +#define I2CM_INTERFACE_RDDATA_4_7 0x110 +#define I2CM_INTERFACE_RDDATA_8_11 0x114 +#define I2CM_INTERFACE_RDDATA_12_15 0x118 +/* SCDC Registers */ +#define SCDC_CONFIG0 0x140 +#define SCDC_I2C_FM_EN BIT(12) +#define SCDC_UPD_FLAGS_AUTO_CLR BIT(6) +#define SCDC_UPD_FLAGS_POLL_EN BIT(4) +#define SCDC_CONTROL0 0x148 +#define SCDC_STATUS0 0x150 +#define STATUS_UPDATE BIT(0) +#define FRL_START BIT(4) +#define FLT_UPDATE BIT(5) +/* FLT Registers */ +#define FLT_CONFIG0 0x160 +#define FLT_CONFIG1 0x164 +#define FLT_CONFIG2 0x168 +#define FLT_CONTROL0 0x170 +/* Main Unit 2 Registers */ +#define MAINUNIT_STATUS0 0x180 +/* Video Interface Registers */ +#define VIDEO_INTERFACE_CONFIG0 0x800 +#define VIDEO_INTERFACE_CONFIG1 0x804 +#define VIDEO_INTERFACE_CONFIG2 0x808 +#define VIDEO_INTERFACE_CONTROL0 0x80c +#define VIDEO_INTERFACE_STATUS0 0x814 +/* Video Packing Registers */ +#define VIDEO_PACKING_CONFIG0 0x81c +/* Audio Interface Registers */ +#define AUDIO_INTERFACE_CONFIG0 0x820 +#define AUD_IF_SEL_MSK 0x3 +#define AUD_IF_SPDIF 0x2 +#define AUD_IF_I2S 0x1 +#define AUD_IF_PAI 0x0 +#define AUD_FIFO_INIT_ON_OVF_MSK BIT(2) +#define AUD_FIFO_INIT_ON_OVF_EN BIT(2) +#define I2S_LINES_EN_MSK GENMASK(7, 4) +#define I2S_LINES_EN(x) BIT((x) + 4) +#define I2S_BPCUV_RCV_MSK BIT(12) +#define I2S_BPCUV_RCV_EN BIT(12) +#define I2S_BPCUV_RCV_DIS 0 +#define SPDIF_LINES_EN GENMASK(19, 16) +#define AUD_FORMAT_MSK GENMASK(26, 24) +#define AUD_3DOBA (0x7 << 24) +#define AUD_3DASP (0x6 << 24) +#define AUD_MSOBA (0x5 << 24) +#define AUD_MSASP (0x4 << 24) +#define AUD_HBR (0x3 << 24) +#define AUD_DST (0x2 << 24) +#define AUD_OBA (0x1 << 24) +#define AUD_ASP (0x0 << 24) +#define AUDIO_INTERFACE_CONFIG1 0x824 +#define AUDIO_INTERFACE_CONTROL0 0x82c +#define AUDIO_FIFO_CLR_P BIT(0) +#define AUDIO_INTERFACE_STATUS0 0x834 +/* Frame Composer Registers */ +#define FRAME_COMPOSER_CONFIG0 0x840 +#define FRAME_COMPOSER_CONFIG1 0x844 +#define FRAME_COMPOSER_CONFIG2 0x848 +#define FRAME_COMPOSER_CONFIG3 0x84c +#define FRAME_COMPOSER_CONFIG4 0x850 +#define FRAME_COMPOSER_CONFIG5 0x854 +#define FRAME_COMPOSER_CONFIG6 0x858 +#define FRAME_COMPOSER_CONFIG7 0x85c +#define FRAME_COMPOSER_CONFIG8 0x860 +#define FRAME_COMPOSER_CONFIG9 0x864 +#define FRAME_COMPOSER_CONTROL0 0x86c +/* Video Monitor Registers */ +#define VIDEO_MONITOR_CONFIG0 0x880 +#define VIDEO_MONITOR_STATUS0 0x884 +#define VIDEO_MONITOR_STATUS1 0x888 +#define VIDEO_MONITOR_STATUS2 0x88c +#define VIDEO_MONITOR_STATUS3 0x890 +#define VIDEO_MONITOR_STATUS4 0x894 +#define VIDEO_MONITOR_STATUS5 0x898 +#define VIDEO_MONITOR_STATUS6 0x89c +/* HDCP2 Logic Registers */ +#define HDCP2LOGIC_CONFIG0 0x8e0 +#define HDCP2_BYPASS BIT(0) +#define HDCP2LOGIC_ESM_GPIO_IN 0x8e4 +#define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8 +/* HDCP14 Registers */ +#define HDCP14_CONFIG0 0x900 +#define HDCP14_CONFIG1 0x904 +#define HDCP14_CONFIG2 0x908 +#define HDCP14_CONFIG3 0x90c +#define HDCP14_KEY_SEED 0x914 +#define HDCP14_KEY_H 0x918 +#define HDCP14_KEY_L 0x91c +#define HDCP14_KEY_STATUS 0x920 +#define HDCP14_AKSV_H 0x924 +#define HDCP14_AKSV_L 0x928 +#define HDCP14_AN_H 0x92c +#define HDCP14_AN_L 0x930 +#define HDCP14_STATUS0 0x934 +#define HDCP14_STATUS1 0x938 +/* Scrambler Registers */ +#define SCRAMB_CONFIG0 0x960 +/* Video Configuration Registers */ +#define LINK_CONFIG0 0x968 +#define OPMODE_FRL_4LANES BIT(8) +#define OPMODE_DVI BIT(4) +#define OPMODE_FRL BIT(0) +/* TMDS FIFO Registers */ +#define TMDS_FIFO_CONFIG0 0x970 +#define TMDS_FIFO_CONTROL0 0x974 +/* FRL RSFEC Registers */ +#define FRL_RSFEC_CONFIG0 0xa20 +#define FRL_RSFEC_STATUS0 0xa30 +/* FRL Packetizer Registers */ +#define FRL_PKTZ_CONFIG0 0xa40 +#define FRL_PKTZ_CONTROL0 0xa44 +#define FRL_PKTZ_CONTROL1 0xa50 +#define FRL_PKTZ_STATUS1 0xa54 +/* Packet Scheduler Registers */ +#define PKTSCHED_CONFIG0 0xa80 +#define PKTSCHED_PRQUEUE0_CONFIG0 0xa84 +#define PKTSCHED_PRQUEUE1_CONFIG0 0xa88 +#define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c +#define PKTSCHED_PRQUEUE2_CONFIG1 0xa90 +#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94 +#define PKTSCHED_PKT_CONFIG0 0xa98 +#define PKTSCHED_PKT_CONFIG1 0xa9c +#define PKTSCHED_DRMI_FIELDRATE BIT(13) +#define PKTSCHED_AVI_FIELDRATE BIT(12) +#define PKTSCHED_PKT_CONFIG2 0xaa0 +#define PKTSCHED_PKT_CONFIG3 0xaa4 +#define PKTSCHED_PKT_EN 0xaa8 +#define PKTSCHED_DRMI_TX_EN BIT(17) +#define PKTSCHED_AUDI_TX_EN BIT(15) +#define PKTSCHED_AVI_TX_EN BIT(13) +#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) +#define PKTSCHED_AMD_TX_EN BIT(8) +#define PKTSCHED_GCP_TX_EN BIT(3) +#define PKTSCHED_AUDS_TX_EN BIT(2) +#define PKTSCHED_ACR_TX_EN BIT(1) +#define PKTSCHED_NULL_TX_EN BIT(0) +#define PKTSCHED_PKT_CONTROL0 0xaac +#define PKTSCHED_PKT_SEND 0xab0 +#define PKTSCHED_PKT_STATUS0 0xab4 +#define PKTSCHED_PKT_STATUS1 0xab8 +#define PKT_NULL_CONTENTS0 0xb00 +#define PKT_NULL_CONTENTS1 0xb04 +#define PKT_NULL_CONTENTS2 0xb08 +#define PKT_NULL_CONTENTS3 0xb0c +#define PKT_NULL_CONTENTS4 0xb10 +#define PKT_NULL_CONTENTS5 0xb14 +#define PKT_NULL_CONTENTS6 0xb18 +#define PKT_NULL_CONTENTS7 0xb1c +#define PKT_ACP_CONTENTS0 0xb20 +#define PKT_ACP_CONTENTS1 0xb24 +#define PKT_ACP_CONTENTS2 0xb28 +#define PKT_ACP_CONTENTS3 0xb2c +#define PKT_ACP_CONTENTS4 0xb30 +#define PKT_ACP_CONTENTS5 0xb34 +#define PKT_ACP_CONTENTS6 0xb38 +#define PKT_ACP_CONTENTS7 0xb3c +#define PKT_ISRC1_CONTENTS0 0xb40 +#define PKT_ISRC1_CONTENTS1 0xb44 +#define PKT_ISRC1_CONTENTS2 0xb48 +#define PKT_ISRC1_CONTENTS3 0xb4c +#define PKT_ISRC1_CONTENTS4 0xb50 +#define PKT_ISRC1_CONTENTS5 0xb54 +#define PKT_ISRC1_CONTENTS6 0xb58 +#define PKT_ISRC1_CONTENTS7 0xb5c +#define PKT_ISRC2_CONTENTS0 0xb60 +#define PKT_ISRC2_CONTENTS1 0xb64 +#define PKT_ISRC2_CONTENTS2 0xb68 +#define PKT_ISRC2_CONTENTS3 0xb6c +#define PKT_ISRC2_CONTENTS4 0xb70 +#define PKT_ISRC2_CONTENTS5 0xb74 +#define PKT_ISRC2_CONTENTS6 0xb78 +#define PKT_ISRC2_CONTENTS7 0xb7c +#define PKT_GMD_CONTENTS0 0xb80 +#define PKT_GMD_CONTENTS1 0xb84 +#define PKT_GMD_CONTENTS2 0xb88 +#define PKT_GMD_CONTENTS3 0xb8c +#define PKT_GMD_CONTENTS4 0xb90 +#define PKT_GMD_CONTENTS5 0xb94 +#define PKT_GMD_CONTENTS6 0xb98 +#define PKT_GMD_CONTENTS7 0xb9c +#define PKT_AMD_CONTENTS0 0xba0 +#define PKT_AMD_CONTENTS1 0xba4 +#define PKT_AMD_CONTENTS2 0xba8 +#define PKT_AMD_CONTENTS3 0xbac +#define PKT_AMD_CONTENTS4 0xbb0 +#define PKT_AMD_CONTENTS5 0xbb4 +#define PKT_AMD_CONTENTS6 0xbb8 +#define PKT_AMD_CONTENTS7 0xbbc +#define PKT_VSI_CONTENTS0 0xbc0 +#define PKT_VSI_CONTENTS1 0xbc4 +#define PKT_VSI_CONTENTS2 0xbc8 +#define PKT_VSI_CONTENTS3 0xbcc +#define PKT_VSI_CONTENTS4 0xbd0 +#define PKT_VSI_CONTENTS5 0xbd4 +#define PKT_VSI_CONTENTS6 0xbd8 +#define PKT_VSI_CONTENTS7 0xbdc +#define PKT_AVI_CONTENTS0 0xbe0 +#define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4) +#define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04 +#define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08 +#define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80 +#define PKT_AVI_CONTENTS1 0xbe4 +#define PKT_AVI_CONTENTS2 0xbe8 +#define PKT_AVI_CONTENTS3 0xbec +#define PKT_AVI_CONTENTS4 0xbf0 +#define PKT_AVI_CONTENTS5 0xbf4 +#define PKT_AVI_CONTENTS6 0xbf8 +#define PKT_AVI_CONTENTS7 0xbfc +#define PKT_SPDI_CONTENTS0 0xc00 +#define PKT_SPDI_CONTENTS1 0xc04 +#define PKT_SPDI_CONTENTS2 0xc08 +#define PKT_SPDI_CONTENTS3 0xc0c +#define PKT_SPDI_CONTENTS4 0xc10 +#define PKT_SPDI_CONTENTS5 0xc14 +#define PKT_SPDI_CONTENTS6 0xc18 +#define PKT_SPDI_CONTENTS7 0xc1c +#define PKT_AUDI_CONTENTS0 0xc20 +#define PKT_AUDI_CONTENTS1 0xc24 +#define PKT_AUDI_CONTENTS2 0xc28 +#define PKT_AUDI_CONTENTS3 0xc2c +#define PKT_AUDI_CONTENTS4 0xc30 +#define PKT_AUDI_CONTENTS5 0xc34 +#define PKT_AUDI_CONTENTS6 0xc38 +#define PKT_AUDI_CONTENTS7 0xc3c +#define PKT_NVI_CONTENTS0 0xc40 +#define PKT_NVI_CONTENTS1 0xc44 +#define PKT_NVI_CONTENTS2 0xc48 +#define PKT_NVI_CONTENTS3 0xc4c +#define PKT_NVI_CONTENTS4 0xc50 +#define PKT_NVI_CONTENTS5 0xc54 +#define PKT_NVI_CONTENTS6 0xc58 +#define PKT_NVI_CONTENTS7 0xc5c +#define PKT_DRMI_CONTENTS0 0xc60 +#define PKT_DRMI_CONTENTS1 0xc64 +#define PKT_DRMI_CONTENTS2 0xc68 +#define PKT_DRMI_CONTENTS3 0xc6c +#define PKT_DRMI_CONTENTS4 0xc70 +#define PKT_DRMI_CONTENTS5 0xc74 +#define PKT_DRMI_CONTENTS6 0xc78 +#define PKT_DRMI_CONTENTS7 0xc7c +#define PKT_GHDMI1_CONTENTS0 0xc80 +#define PKT_GHDMI1_CONTENTS1 0xc84 +#define PKT_GHDMI1_CONTENTS2 0xc88 +#define PKT_GHDMI1_CONTENTS3 0xc8c +#define PKT_GHDMI1_CONTENTS4 0xc90 +#define PKT_GHDMI1_CONTENTS5 0xc94 +#define PKT_GHDMI1_CONTENTS6 0xc98 +#define PKT_GHDMI1_CONTENTS7 0xc9c +#define PKT_GHDMI2_CONTENTS0 0xca0 +#define PKT_GHDMI2_CONTENTS1 0xca4 +#define PKT_GHDMI2_CONTENTS2 0xca8 +#define PKT_GHDMI2_CONTENTS3 0xcac +#define PKT_GHDMI2_CONTENTS4 0xcb0 +#define PKT_GHDMI2_CONTENTS5 0xcb4 +#define PKT_GHDMI2_CONTENTS6 0xcb8 +#define PKT_GHDMI2_CONTENTS7 0xcbc +/* EMP Packetizer Registers */ +#define PKT_EMP_CONFIG0 0xce0 +#define PKT_EMP_CONTROL0 0xcec +#define PKT_EMP_CONTROL1 0xcf0 +#define PKT_EMP_CONTROL2 0xcf4 +#define PKT_EMP_VTEM_CONTENTS0 0xd00 +#define PKT_EMP_VTEM_CONTENTS1 0xd04 +#define PKT_EMP_VTEM_CONTENTS2 0xd08 +#define PKT_EMP_VTEM_CONTENTS3 0xd0c +#define PKT_EMP_VTEM_CONTENTS4 0xd10 +#define PKT_EMP_VTEM_CONTENTS5 0xd14 +#define PKT_EMP_VTEM_CONTENTS6 0xd18 +#define PKT_EMP_VTEM_CONTENTS7 0xd1c +#define PKT0_EMP_CVTEM_CONTENTS0 0xd20 +#define PKT0_EMP_CVTEM_CONTENTS1 0xd24 +#define PKT0_EMP_CVTEM_CONTENTS2 0xd28 +#define PKT0_EMP_CVTEM_CONTENTS3 0xd2c +#define PKT0_EMP_CVTEM_CONTENTS4 0xd30 +#define PKT0_EMP_CVTEM_CONTENTS5 0xd34 +#define PKT0_EMP_CVTEM_CONTENTS6 0xd38 +#define PKT0_EMP_CVTEM_CONTENTS7 0xd3c +#define PKT1_EMP_CVTEM_CONTENTS0 0xd40 +#define PKT1_EMP_CVTEM_CONTENTS1 0xd44 +#define PKT1_EMP_CVTEM_CONTENTS2 0xd48 +#define PKT1_EMP_CVTEM_CONTENTS3 0xd4c +#define PKT1_EMP_CVTEM_CONTENTS4 0xd50 +#define PKT1_EMP_CVTEM_CONTENTS5 0xd54 +#define PKT1_EMP_CVTEM_CONTENTS6 0xd58 +#define PKT1_EMP_CVTEM_CONTENTS7 0xd5c +#define PKT2_EMP_CVTEM_CONTENTS0 0xd60 +#define PKT2_EMP_CVTEM_CONTENTS1 0xd64 +#define PKT2_EMP_CVTEM_CONTENTS2 0xd68 +#define PKT2_EMP_CVTEM_CONTENTS3 0xd6c +#define PKT2_EMP_CVTEM_CONTENTS4 0xd70 +#define PKT2_EMP_CVTEM_CONTENTS5 0xd74 +#define PKT2_EMP_CVTEM_CONTENTS6 0xd78 +#define PKT2_EMP_CVTEM_CONTENTS7 0xd7c +#define PKT3_EMP_CVTEM_CONTENTS0 0xd80 +#define PKT3_EMP_CVTEM_CONTENTS1 0xd84 +#define PKT3_EMP_CVTEM_CONTENTS2 0xd88 +#define PKT3_EMP_CVTEM_CONTENTS3 0xd8c +#define PKT3_EMP_CVTEM_CONTENTS4 0xd90 +#define PKT3_EMP_CVTEM_CONTENTS5 0xd94 +#define PKT3_EMP_CVTEM_CONTENTS6 0xd98 +#define PKT3_EMP_CVTEM_CONTENTS7 0xd9c +#define PKT4_EMP_CVTEM_CONTENTS0 0xda0 +#define PKT4_EMP_CVTEM_CONTENTS1 0xda4 +#define PKT4_EMP_CVTEM_CONTENTS2 0xda8 +#define PKT4_EMP_CVTEM_CONTENTS3 0xdac +#define PKT4_EMP_CVTEM_CONTENTS4 0xdb0 +#define PKT4_EMP_CVTEM_CONTENTS5 0xdb4 +#define PKT4_EMP_CVTEM_CONTENTS6 0xdb8 +#define PKT4_EMP_CVTEM_CONTENTS7 0xdbc +#define PKT5_EMP_CVTEM_CONTENTS0 0xdc0 +#define PKT5_EMP_CVTEM_CONTENTS1 0xdc4 +#define PKT5_EMP_CVTEM_CONTENTS2 0xdc8 +#define PKT5_EMP_CVTEM_CONTENTS3 0xdcc +#define PKT5_EMP_CVTEM_CONTENTS4 0xdd0 +#define PKT5_EMP_CVTEM_CONTENTS5 0xdd4 +#define PKT5_EMP_CVTEM_CONTENTS6 0xdd8 +#define PKT5_EMP_CVTEM_CONTENTS7 0xddc +/* Audio Packetizer Registers */ +#define AUDPKT_CONTROL0 0xe20 +#define AUDPKT_PBIT_FORCE_EN_MASK BIT(12) +#define AUDPKT_PBIT_FORCE_EN BIT(12) +#define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0) +#define AUDPKT_CHSTATUS_OVR_EN BIT(0) +#define AUDPKT_CONTROL1 0xe24 +#define AUDPKT_ACR_CONTROL0 0xe40 +#define AUDPKT_ACR_N_VALUE 0xfffff +#define AUDPKT_ACR_CONTROL1 0xe44 +#define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4) +#define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4) +#define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1) +#define AUDPKT_ACR_CTS_OVR_EN BIT(1) +#define AUDPKT_ACR_STATUS0 0xe4c +#define AUDPKT_CHSTATUS_OVR0 0xe60 +#define AUDPKT_CHSTATUS_OVR1 0xe64 +/* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */ +#define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0) +#define AUDPKT_CHSTATUS_SR_22050 0x4 +#define AUDPKT_CHSTATUS_SR_24000 0x6 +#define AUDPKT_CHSTATUS_SR_32000 0x3 +#define AUDPKT_CHSTATUS_SR_44100 0x0 +#define AUDPKT_CHSTATUS_SR_48000 0x2 +#define AUDPKT_CHSTATUS_SR_88200 0x8 +#define AUDPKT_CHSTATUS_SR_96000 0xa +#define AUDPKT_CHSTATUS_SR_176400 0xc +#define AUDPKT_CHSTATUS_SR_192000 0xe +#define AUDPKT_CHSTATUS_SR_768000 0x9 +#define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1 +/* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */ +#define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12) +#define AUDPKT_CHSTATUS_OSR_8000 0x6 +#define AUDPKT_CHSTATUS_OSR_11025 0xa +#define AUDPKT_CHSTATUS_OSR_12000 0x2 +#define AUDPKT_CHSTATUS_OSR_16000 0x8 +#define AUDPKT_CHSTATUS_OSR_22050 0xb +#define AUDPKT_CHSTATUS_OSR_24000 0x9 +#define AUDPKT_CHSTATUS_OSR_32000 0xc +#define AUDPKT_CHSTATUS_OSR_44100 0xf +#define AUDPKT_CHSTATUS_OSR_48000 0xd +#define AUDPKT_CHSTATUS_OSR_88200 0x7 +#define AUDPKT_CHSTATUS_OSR_96000 0x5 +#define AUDPKT_CHSTATUS_OSR_176400 0x3 +#define AUDPKT_CHSTATUS_OSR_192000 0x1 +#define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0 +#define AUDPKT_CHSTATUS_OVR2 0xe68 +#define AUDPKT_CHSTATUS_OVR3 0xe6c +#define AUDPKT_CHSTATUS_OVR4 0xe70 +#define AUDPKT_CHSTATUS_OVR5 0xe74 +#define AUDPKT_CHSTATUS_OVR6 0xe78 +#define AUDPKT_CHSTATUS_OVR7 0xe7c +#define AUDPKT_CHSTATUS_OVR8 0xe80 +#define AUDPKT_CHSTATUS_OVR9 0xe84 +#define AUDPKT_CHSTATUS_OVR10 0xe88 +#define AUDPKT_CHSTATUS_OVR11 0xe8c +#define AUDPKT_CHSTATUS_OVR12 0xe90 +#define AUDPKT_CHSTATUS_OVR13 0xe94 +#define AUDPKT_CHSTATUS_OVR14 0xe98 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac +#define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec +#define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc +#define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18 +#define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c +#define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20 +#define AUDPKT_VBIT_OVR0 0xf24 +/* CEC Registers */ +#define CEC_TX_CONTROL 0x1000 +#define CEC_STATUS 0x1004 +#define CEC_CONFIG 0x1008 +#define CEC_ADDR 0x100c +#define CEC_TX_COUNT 0x1020 +#define CEC_TX_DATA3_0 0x1024 +#define CEC_TX_DATA7_4 0x1028 +#define CEC_TX_DATA11_8 0x102c +#define CEC_TX_DATA15_12 0x1030 +#define CEC_RX_COUNT_STATUS 0x1040 +#define CEC_RX_DATA3_0 0x1044 +#define CEC_RX_DATA7_4 0x1048 +#define CEC_RX_DATA11_8 0x104c +#define CEC_RX_DATA15_12 0x1050 +#define CEC_LOCK_CONTROL 0x1054 +#define CEC_RXQUAL_BITTIME_CONFIG 0x1060 +#define CEC_RX_BITTIME_CONFIG 0x1064 +#define CEC_TX_BITTIME_CONFIG 0x1068 +/* eARC RX CMDC Registers */ +#define EARCRX_CMDC_CONFIG0 0x1800 +#define EARCRX_XACTREAD_STOP_CFG BIT(26) +#define EARCRX_XACTREAD_RETRY_CFG BIT(25) +#define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24) +#define EARCRX_CMDC_XACT_RESTART_EN BIT(18) +#define EARCRX_CMDC_CONFIG1 0x1804 +#define EARCRX_CMDC_CONTROL 0x1808 +#define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4) +#define EARCRX_CMDC_DISCOVERY_EN BIT(3) +#define EARCRX_CONNECTOR_HPD BIT(1) +#define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c +#define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810 +#define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814 +#define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818 +#define EARCRX_CMDC_STATUS 0x181c +#define EARCRX_CMDC_XACT_INFO 0x1820 +#define EARCRX_CMDC_XACT_ACTION 0x1824 +#define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828 +#define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c +#define EARCRX_CMDC_XACT_WR0 0x1840 +#define EARCRX_CMDC_XACT_WR1 0x1844 +#define EARCRX_CMDC_XACT_WR2 0x1848 +#define EARCRX_CMDC_XACT_WR3 0x184c +#define EARCRX_CMDC_XACT_WR4 0x1850 +#define EARCRX_CMDC_XACT_WR5 0x1854 +#define EARCRX_CMDC_XACT_WR6 0x1858 +#define EARCRX_CMDC_XACT_WR7 0x185c +#define EARCRX_CMDC_XACT_WR8 0x1860 +#define EARCRX_CMDC_XACT_WR9 0x1864 +#define EARCRX_CMDC_XACT_WR10 0x1868 +#define EARCRX_CMDC_XACT_WR11 0x186c +#define EARCRX_CMDC_XACT_WR12 0x1870 +#define EARCRX_CMDC_XACT_WR13 0x1874 +#define EARCRX_CMDC_XACT_WR14 0x1878 +#define EARCRX_CMDC_XACT_WR15 0x187c +#define EARCRX_CMDC_XACT_WR16 0x1880 +#define EARCRX_CMDC_XACT_WR17 0x1884 +#define EARCRX_CMDC_XACT_WR18 0x1888 +#define EARCRX_CMDC_XACT_WR19 0x188c +#define EARCRX_CMDC_XACT_WR20 0x1890 +#define EARCRX_CMDC_XACT_WR21 0x1894 +#define EARCRX_CMDC_XACT_WR22 0x1898 +#define EARCRX_CMDC_XACT_WR23 0x189c +#define EARCRX_CMDC_XACT_WR24 0x18a0 +#define EARCRX_CMDC_XACT_WR25 0x18a4 +#define EARCRX_CMDC_XACT_WR26 0x18a8 +#define EARCRX_CMDC_XACT_WR27 0x18ac +#define EARCRX_CMDC_XACT_WR28 0x18b0 +#define EARCRX_CMDC_XACT_WR29 0x18b4 +#define EARCRX_CMDC_XACT_WR30 0x18b8 +#define EARCRX_CMDC_XACT_WR31 0x18bc +#define EARCRX_CMDC_XACT_WR32 0x18c0 +#define EARCRX_CMDC_XACT_WR33 0x18c4 +#define EARCRX_CMDC_XACT_WR34 0x18c8 +#define EARCRX_CMDC_XACT_WR35 0x18cc +#define EARCRX_CMDC_XACT_WR36 0x18d0 +#define EARCRX_CMDC_XACT_WR37 0x18d4 +#define EARCRX_CMDC_XACT_WR38 0x18d8 +#define EARCRX_CMDC_XACT_WR39 0x18dc +#define EARCRX_CMDC_XACT_WR40 0x18e0 +#define EARCRX_CMDC_XACT_WR41 0x18e4 +#define EARCRX_CMDC_XACT_WR42 0x18e8 +#define EARCRX_CMDC_XACT_WR43 0x18ec +#define EARCRX_CMDC_XACT_WR44 0x18f0 +#define EARCRX_CMDC_XACT_WR45 0x18f4 +#define EARCRX_CMDC_XACT_WR46 0x18f8 +#define EARCRX_CMDC_XACT_WR47 0x18fc +#define EARCRX_CMDC_XACT_WR48 0x1900 +#define EARCRX_CMDC_XACT_WR49 0x1904 +#define EARCRX_CMDC_XACT_WR50 0x1908 +#define EARCRX_CMDC_XACT_WR51 0x190c +#define EARCRX_CMDC_XACT_WR52 0x1910 +#define EARCRX_CMDC_XACT_WR53 0x1914 +#define EARCRX_CMDC_XACT_WR54 0x1918 +#define EARCRX_CMDC_XACT_WR55 0x191c +#define EARCRX_CMDC_XACT_WR56 0x1920 +#define EARCRX_CMDC_XACT_WR57 0x1924 +#define EARCRX_CMDC_XACT_WR58 0x1928 +#define EARCRX_CMDC_XACT_WR59 0x192c +#define EARCRX_CMDC_XACT_WR60 0x1930 +#define EARCRX_CMDC_XACT_WR61 0x1934 +#define EARCRX_CMDC_XACT_WR62 0x1938 +#define EARCRX_CMDC_XACT_WR63 0x193c +#define EARCRX_CMDC_XACT_WR64 0x1940 +#define EARCRX_CMDC_XACT_RD0 0x1960 +#define EARCRX_CMDC_XACT_RD1 0x1964 +#define EARCRX_CMDC_XACT_RD2 0x1968 +#define EARCRX_CMDC_XACT_RD3 0x196c +#define EARCRX_CMDC_XACT_RD4 0x1970 +#define EARCRX_CMDC_XACT_RD5 0x1974 +#define EARCRX_CMDC_XACT_RD6 0x1978 +#define EARCRX_CMDC_XACT_RD7 0x197c +#define EARCRX_CMDC_XACT_RD8 0x1980 +#define EARCRX_CMDC_XACT_RD9 0x1984 +#define EARCRX_CMDC_XACT_RD10 0x1988 +#define EARCRX_CMDC_XACT_RD11 0x198c +#define EARCRX_CMDC_XACT_RD12 0x1990 +#define EARCRX_CMDC_XACT_RD13 0x1994 +#define EARCRX_CMDC_XACT_RD14 0x1998 +#define EARCRX_CMDC_XACT_RD15 0x199c +#define EARCRX_CMDC_XACT_RD16 0x19a0 +#define EARCRX_CMDC_XACT_RD17 0x19a4 +#define EARCRX_CMDC_XACT_RD18 0x19a8 +#define EARCRX_CMDC_XACT_RD19 0x19ac +#define EARCRX_CMDC_XACT_RD20 0x19b0 +#define EARCRX_CMDC_XACT_RD21 0x19b4 +#define EARCRX_CMDC_XACT_RD22 0x19b8 +#define EARCRX_CMDC_XACT_RD23 0x19bc +#define EARCRX_CMDC_XACT_RD24 0x19c0 +#define EARCRX_CMDC_XACT_RD25 0x19c4 +#define EARCRX_CMDC_XACT_RD26 0x19c8 +#define EARCRX_CMDC_XACT_RD27 0x19cc +#define EARCRX_CMDC_XACT_RD28 0x19d0 +#define EARCRX_CMDC_XACT_RD29 0x19d4 +#define EARCRX_CMDC_XACT_RD30 0x19d8 +#define EARCRX_CMDC_XACT_RD31 0x19dc +#define EARCRX_CMDC_XACT_RD32 0x19e0 +#define EARCRX_CMDC_XACT_RD33 0x19e4 +#define EARCRX_CMDC_XACT_RD34 0x19e8 +#define EARCRX_CMDC_XACT_RD35 0x19ec +#define EARCRX_CMDC_XACT_RD36 0x19f0 +#define EARCRX_CMDC_XACT_RD37 0x19f4 +#define EARCRX_CMDC_XACT_RD38 0x19f8 +#define EARCRX_CMDC_XACT_RD39 0x19fc +#define EARCRX_CMDC_XACT_RD40 0x1a00 +#define EARCRX_CMDC_XACT_RD41 0x1a04 +#define EARCRX_CMDC_XACT_RD42 0x1a08 +#define EARCRX_CMDC_XACT_RD43 0x1a0c +#define EARCRX_CMDC_XACT_RD44 0x1a10 +#define EARCRX_CMDC_XACT_RD45 0x1a14 +#define EARCRX_CMDC_XACT_RD46 0x1a18 +#define EARCRX_CMDC_XACT_RD47 0x1a1c +#define EARCRX_CMDC_XACT_RD48 0x1a20 +#define EARCRX_CMDC_XACT_RD49 0x1a24 +#define EARCRX_CMDC_XACT_RD50 0x1a28 +#define EARCRX_CMDC_XACT_RD51 0x1a2c +#define EARCRX_CMDC_XACT_RD52 0x1a30 +#define EARCRX_CMDC_XACT_RD53 0x1a34 +#define EARCRX_CMDC_XACT_RD54 0x1a38 +#define EARCRX_CMDC_XACT_RD55 0x1a3c +#define EARCRX_CMDC_XACT_RD56 0x1a40 +#define EARCRX_CMDC_XACT_RD57 0x1a44 +#define EARCRX_CMDC_XACT_RD58 0x1a48 +#define EARCRX_CMDC_XACT_RD59 0x1a4c +#define EARCRX_CMDC_XACT_RD60 0x1a50 +#define EARCRX_CMDC_XACT_RD61 0x1a54 +#define EARCRX_CMDC_XACT_RD62 0x1a58 +#define EARCRX_CMDC_XACT_RD63 0x1a5c +#define EARCRX_CMDC_XACT_RD64 0x1a60 +#define EARCRX_CMDC_SYNC_CONFIG 0x1b00 +/* eARC RX DMAC Registers */ +#define EARCRX_DMAC_PHY_CONTROL 0x1c00 +#define EARCRX_DMAC_CONFIG 0x1c08 +#define EARCRX_DMAC_CONTROL0 0x1c0c +#define EARCRX_DMAC_AUDIO_EN BIT(1) +#define EARCRX_DMAC_EN BIT(0) +#define EARCRX_DMAC_CONTROL1 0x1c10 +#define EARCRX_DMAC_STATUS 0x1c14 +#define EARCRX_DMAC_CHSTATUS0 0x1c18 +#define EARCRX_DMAC_CHSTATUS1 0x1c1c +#define EARCRX_DMAC_CHSTATUS2 0x1c20 +#define EARCRX_DMAC_CHSTATUS3 0x1c24 +#define EARCRX_DMAC_CHSTATUS4 0x1c28 +#define EARCRX_DMAC_CHSTATUS5 0x1c2c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8 +#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec +#define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc +#define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38 +#define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c +#define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40 +#define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44 +#define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48 +#define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c +#define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50 +#define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54 +#define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58 +#define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c +#define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60 +#define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64 +#define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68 +#define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c +#define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70 +#define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74 +#define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78 +#define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c +#define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80 +/* Main Unit Interrupt Registers */ +#define MAIN_INTVEC_INDEX 0x3000 +#define MAINUNIT_0_INT_STATUS 0x3010 +#define MAINUNIT_0_INT_MASK_N 0x3014 +#define MAINUNIT_0_INT_CLEAR 0x3018 +#define MAINUNIT_0_INT_FORCE 0x301c +#define MAINUNIT_1_INT_STATUS 0x3020 +#define FLT_EXIT_TO_LTSL_IRQ BIT(22) +#define FLT_EXIT_TO_LTS4_IRQ BIT(21) +#define FLT_EXIT_TO_LTSP_IRQ BIT(20) +#define SCDC_NACK_RCVD_IRQ BIT(12) +#define SCDC_RR_REPLY_STOP_IRQ BIT(11) +#define SCDC_UPD_FLAGS_CLR_IRQ BIT(10) +#define SCDC_UPD_FLAGS_CHG_IRQ BIT(9) +#define SCDC_UPD_FLAGS_RD_IRQ BIT(8) +#define I2CM_NACK_RCVD_IRQ BIT(2) +#define I2CM_READ_REQUEST_IRQ BIT(1) +#define I2CM_OP_DONE_IRQ BIT(0) +#define MAINUNIT_1_INT_MASK_N 0x3024 +#define I2CM_NACK_RCVD_MASK_N BIT(2) +#define I2CM_READ_REQUEST_MASK_N BIT(1) +#define I2CM_OP_DONE_MASK_N BIT(0) +#define MAINUNIT_1_INT_CLEAR 0x3028 +#define I2CM_NACK_RCVD_CLEAR BIT(2) +#define I2CM_READ_REQUEST_CLEAR BIT(1) +#define I2CM_OP_DONE_CLEAR BIT(0) +#define MAINUNIT_1_INT_FORCE 0x302c +/* AVPUNIT Interrupt Registers */ +#define AVP_INTVEC_INDEX 0x3800 +#define AVP_0_INT_STATUS 0x3810 +#define AVP_0_INT_MASK_N 0x3814 +#define AVP_0_INT_CLEAR 0x3818 +#define AVP_0_INT_FORCE 0x381c +#define AVP_1_INT_STATUS 0x3820 +#define AVP_1_INT_MASK_N 0x3824 +#define HDCP14_AUTH_CHG_MASK_N BIT(6) +#define AVP_1_INT_CLEAR 0x3828 +#define AVP_1_INT_FORCE 0x382c +#define AVP_2_INT_STATUS 0x3830 +#define AVP_2_INT_MASK_N 0x3834 +#define AVP_2_INT_CLEAR 0x3838 +#define AVP_2_INT_FORCE 0x383c +#define AVP_3_INT_STATUS 0x3840 +#define AVP_3_INT_MASK_N 0x3844 +#define AVP_3_INT_CLEAR 0x3848 +#define AVP_3_INT_FORCE 0x384c +#define AVP_4_INT_STATUS 0x3850 +#define AVP_4_INT_MASK_N 0x3854 +#define AVP_4_INT_CLEAR 0x3858 +#define AVP_4_INT_FORCE 0x385c +#define AVP_5_INT_STATUS 0x3860 +#define AVP_5_INT_MASK_N 0x3864 +#define AVP_5_INT_CLEAR 0x3868 +#define AVP_5_INT_FORCE 0x386c +#define AVP_6_INT_STATUS 0x3870 +#define AVP_6_INT_MASK_N 0x3874 +#define AVP_6_INT_CLEAR 0x3878 +#define AVP_6_INT_FORCE 0x387c +/* CEC Interrupt Registers */ +#define CEC_INT_STATUS 0x4000 +#define CEC_INT_MASK_N 0x4004 +#define CEC_INT_CLEAR 0x4008 +#define CEC_INT_FORCE 0x400c +/* eARC RX Interrupt Registers */ +#define EARCRX_INTVEC_INDEX 0x4800 +#define EARCRX_0_INT_STATUS 0x4810 +#define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9) +#define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8) +#define EARCRX_0_INT_MASK_N 0x4814 +#define EARCRX_0_INT_CLEAR 0x4818 +#define EARCRX_0_INT_FORCE 0x481c +#define EARCRX_1_INT_STATUS 0x4820 +#define EARCRX_1_INT_MASK_N 0x4824 +#define EARCRX_1_INT_CLEAR 0x4828 +#define EARCRX_1_INT_FORCE 0x482c + +#endif /* __DW_HDMI_QP_H__ */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0031f3c54882..996733ed2c00 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3503,6 +3503,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.of_node = pdev->dev.of_node; hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + if (hdmi->version >= 0x200a) + hdmi->bridge.ycbcr_420_allowed = plat_data->ycbcr_420_allowed; + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index f3afdab55c11..7275e66faefc 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1707,13 +1707,20 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge, { struct tc_data *tc = bridge_to_tc(bridge); - drm_mode_copy(&tc->mode, mode); + drm_mode_copy(&tc->mode, adj); } static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } return drm_edid_read_ddc(connector, &tc->aux.ddc); } @@ -2169,19 +2176,31 @@ static const struct regmap_access_table tc_precious_table = { .n_yes_ranges = ARRAY_SIZE(tc_precious_ranges), }; -static const struct regmap_range tc_non_writeable_ranges[] = { - regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI), - regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI), - regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS), - regmap_reg_range(TC_IDREG, SYSSTAT), - regmap_reg_range(GPIOI, GPIOI), - regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ), -}; - -static const struct regmap_access_table tc_writeable_table = { - .no_ranges = tc_non_writeable_ranges, - .n_no_ranges = ARRAY_SIZE(tc_non_writeable_ranges), -}; +static bool tc_writeable_reg(struct device *dev, unsigned int reg) +{ + /* RO reg */ + switch (reg) { + case PPI_BUSYPPI: + case DSI_BUSYDSI: + case DSI_LANESTATUS0: + case DSI_LANESTATUS1: + case DSI_INTSTATUS: + case TC_IDREG: + case SYSBOOT: + case SYSSTAT: + case GPIOI: + case DP0_LTSTAT: + case DP0_SNKLTCHGREQ: + return false; + } + /* WO reg */ + switch (reg) { + case DSI_STARTDSI: + case DSI_INTCLR: + return true; + } + return tc_readable_reg(dev, reg); +} static const struct regmap_config tc_regmap_config = { .name = "tc358767", @@ -2191,9 +2210,9 @@ static const struct regmap_config tc_regmap_config = { .max_register = PLL_DBG, .cache_type = REGCACHE_MAPLE, .readable_reg = tc_readable_reg, + .writeable_reg = tc_writeable_reg, .volatile_table = &tc_volatile_table, .precious_table = &tc_precious_table, - .wr_table = &tc_writeable_table, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_LITTLE, }; @@ -2229,11 +2248,11 @@ static irqreturn_t tc_irq_handler(int irq, void *arg) bool h = val & INT_GPIO_H(tc->hpd_pin); bool lc = val & INT_GPIO_LC(tc->hpd_pin); - dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, - h ? "H" : "", lc ? "LC" : ""); - - if (h || lc) + if (h || lc) { + dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, + h ? "H" : "", lc ? "LC" : ""); drm_kms_helper_hotplug_event(tc->bridge.dev); + } } regmap_write(tc->regmap, INTSTS_G, val); @@ -2298,7 +2317,8 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc) /* port@1 is the DPI input/output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge); if (ret && ret != -ENODEV) - return ret; + return dev_err_probe(dev, ret, + "Could not find DPI panel or bridge\n"); if (panel) { bridge = devm_drm_panel_bridge_add(dev, panel); @@ -2326,7 +2346,8 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL); if (ret && ret != -ENODEV) - return ret; + return dev_err_probe(dev, ret, + "Could not find DSI panel or bridge\n"); if (panel) { struct drm_bridge *panel_bridge; @@ -2551,7 +2572,7 @@ static int tc_probe(struct i2c_client *client) ret = tc_mipi_dsi_host_attach(tc); if (ret) { drm_bridge_remove(&tc->bridge); - return ret; + return dev_err_probe(dev, ret, "Failed to attach DSI host\n"); } } diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index bb1750a3dab0..2cb748bbefcd 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -461,7 +461,9 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host, ret = -EINVAL; ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0); if (ep) { - ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); + ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines); + if (ret) + ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); of_node_put(ep); } diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index 6b559e071301..a0a1b5dd794e 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -94,7 +94,7 @@ static const struct regmap_access_table dlpc_volatile_table = { .n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges), }; -static struct regmap_config dlpc_regmap_config = { +static const struct regmap_config dlpc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = WR_DSI_PORT_EN, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 582cf4f73a74..9e31f750fd88 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1635,8 +1635,8 @@ static void ti_sn_pwm_unregister(void) } #else -static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } -static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} +static inline int __maybe_unused ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } +static inline void __maybe_unused ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} static inline int ti_sn_pwm_register(void) { return 0; } static inline void ti_sn_pwm_unregister(void) {} diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c new file mode 100644 index 000000000000..3472ed5924e8 --- /dev/null +++ b/drivers/gpu/drm/bridge/ti-tdp158.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2024 Freebox SAS + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> + +struct tdp158 { + struct drm_bridge bridge; + struct drm_bridge *next; + struct gpio_desc *enable; // Operation Enable - pin 36 + struct regulator *vcc; // 3.3V + struct regulator *vdd; // 1.1V + struct device *dev; +}; + +static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + int err; + struct tdp158 *tdp158 = bridge->driver_private; + + err = regulator_enable(tdp158->vcc); + if (err) + dev_err(tdp158->dev, "failed to enable vcc: %d", err); + + err = regulator_enable(tdp158->vdd); + if (err) + dev_err(tdp158->dev, "failed to enable vdd: %d", err); + + gpiod_set_value_cansleep(tdp158->enable, 1); +} + +static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + gpiod_set_value_cansleep(tdp158->enable, 0); + regulator_disable(tdp158->vdd); + regulator_disable(tdp158->vcc); +} + +static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags); +} + +static const struct drm_bridge_funcs tdp158_bridge_funcs = { + .attach = tdp158_attach, + .atomic_enable = tdp158_enable, + .atomic_disable = tdp158_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int tdp158_probe(struct i2c_client *client) +{ + struct tdp158 *tdp158; + struct device *dev = &client->dev; + + tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL); + if (!tdp158) + return -ENOMEM; + + tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(tdp158->next)) + return dev_err_probe(dev, PTR_ERR(tdp158->next), "missing bridge"); + + tdp158->vcc = devm_regulator_get(dev, "vcc"); + if (IS_ERR(tdp158->vcc)) + return dev_err_probe(dev, PTR_ERR(tdp158->vcc), "vcc"); + + tdp158->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(tdp158->vdd)) + return dev_err_probe(dev, PTR_ERR(tdp158->vdd), "vdd"); + + tdp158->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(tdp158->enable)) + return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable"); + + tdp158->bridge.of_node = dev->of_node; + tdp158->bridge.funcs = &tdp158_bridge_funcs; + tdp158->bridge.driver_private = tdp158; + tdp158->dev = dev; + + return devm_drm_bridge_add(dev, &tdp158->bridge); +} + +static const struct of_device_id tdp158_match_table[] = { + { .compatible = "ti,tdp158" }, + { } +}; +MODULE_DEVICE_TABLE(of, tdp158_match_table); + +static struct i2c_driver tdp158_driver = { + .probe = tdp158_probe, + .driver = { + .name = "tdp158", + .of_match_table = tdp158_match_table, + }, +}; +module_i2c_driver(tdp158_driver); + +MODULE_DESCRIPTION("TI TDP158 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index 66e70ced796f..a8fca079921b 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -90,7 +90,12 @@ CONFIG_QCOM_GPI_DMA=y CONFIG_USB_ONBOARD_DEV=y CONFIG_NVMEM_QCOM_QFPROM=y CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y - +CONFIG_REGULATOR_QCOM_REFGEN=y +CONFIG_TYPEC_MUX_FSA4480=y +CONFIG_QCOM_PMIC_GLINK=y +CONFIG_UCSI_PMIC_GLINK=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y # db410c ethernet CONFIG_USB_RTL8152=y diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh index 5a3bdcffae32..139b81db6312 100644 --- a/drivers/gpu/drm/ci/build.sh +++ b/drivers/gpu/drm/ci/build.sh @@ -30,6 +30,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb" + DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb" elif [[ "$KERNEL_ARCH" = "arm" ]]; then GCC_ARCH="arm-linux-gnueabihf" DEBIAN_ARCH="armhf" diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index eca47d4f816f..90bde9f00cc3 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -1,14 +1,14 @@ variables: DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa - DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de + DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6 UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git TARGET_BRANCH: drm-next - IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5 + IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git - DEQP_RUNNER_GIT_TAG: v0.15.0 + DEQP_RUNNER_GIT_TAG: v0.20.0 FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb @@ -153,6 +153,14 @@ stages: # Pre-merge pipeline for Marge Bot - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"' when: on_success + # Push to a branch on a fork + - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"' + +# Rules applied to every job in the pipeline +.common-rules: + rules: + - if: *is-fork-push + when: manual .never-post-merge-rules: rules: diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml index 2c340d063a96..8d8b9e71852e 100644 --- a/drivers/gpu/drm/ci/image-tags.yml +++ b/drivers/gpu/drm/ci/image-tags.yml @@ -1,5 +1,5 @@ variables: - CONTAINER_TAG: "2024-08-07-mesa-uprev" + CONTAINER_TAG: "2024-09-09-uprevs" DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base" DEBIAN_BASE_TAG: "${CONTAINER_TAG}" diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 09d8447840e9..f0ef60c8f56d 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -162,6 +162,22 @@ msm:sdm845: script: - ./install/bare-metal/cros-servo.sh +msm:sm8350-hdk: + extends: + - .lava-igt:arm64 + stage: msm + parallel: 4 + variables: + BOOT_METHOD: fastboot + DEVICE_TYPE: sm8350-hdk + DRIVER_NAME: msm + DTB: ${DEVICE_TYPE} + FARM: collabora + GPU_VERSION: ${DEVICE_TYPE} + KERNEL_IMAGE_NAME: "Image.gz" + KERNEL_IMAGE_TYPE: "" + RUNNER_TAG: mesa-ci-x86-64-lava-sm8350-hdk + .rockchip-device: variables: DTB: ${DEVICE_TYPE} @@ -286,6 +302,15 @@ i915:tgl: GPU_VERSION: tgl RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer +i915:jsl: + extends: + - .i915 + parallel: 4 + variables: + DEVICE_TYPE: acer-cb317-1h-c3z6-dedede + GPU_VERSION: jsl + RUNNER_TAG: mesa-ci-x86-64-lava-acer-cb317-1h-c3z6-dedede + .amdgpu: extends: - .lava-igt:x86_64 diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt index 8e2fed6d76a3..f44dbce3151a 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt @@ -2,6 +2,7 @@ amdgpu/amd_abm@abm_enabled,Fail amdgpu/amd_abm@abm_gradual,Fail amdgpu/amd_abm@backlight_monotonic_abm,Fail amdgpu/amd_abm@backlight_monotonic_basic,Fail +amdgpu/amd_abm@dpms_cycle,Fail amdgpu/amd_assr@assr-links,Fail amdgpu/amd_assr@assr-links-dpms,Fail amdgpu/amd_mall@static-screen,Crash @@ -14,7 +15,6 @@ amdgpu/amd_plane@mpo-scale-p010,Fail amdgpu/amd_plane@mpo-scale-rgb,Crash amdgpu/amd_plane@mpo-swizzle-toggle,Fail amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail -dumb_buffer@invalid-bpp,Fail kms_addfb_basic@bad-pitch-65536,Fail kms_addfb_basic@bo-too-small,Fail kms_addfb_basic@too-high,Fail diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt index e4faa96fa000..e70bd9d447ca 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt @@ -18,3 +18,10 @@ kms_async_flips@crc # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 kms_plane@pixel-format-source-clamping + +# Board Name: hp-11A-G6-EE-grunt +# Bug Report: https://lore.kernel.org/amd-gfx/09ee1862-3a0e-4085-ac1b-262601b1122b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_async_flips@async-flip-with-page-flip-events diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt index 9b84f68a5122..0907cb0f6d9e 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt @@ -1,3 +1,4 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail core_setmaster_vs_auth,Fail i915_module_load@load,Fail @@ -6,7 +7,6 @@ i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_fb_coherency@memset-crc,Crash kms_flip@busy-flip,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt index 581f0da4d0f2..0207c9807bee 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt @@ -46,3 +46,10 @@ i915_hangman@engine-engine-hang # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_pm_rpm@modeset-lpsp-stress + +# Board Name: asus-C433TA-AJ0005-rammus +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_pm_rpm@drm-resources-equal diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt index e612281149aa..64772fedaed5 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt @@ -8,7 +8,6 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail -kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt index 4663d4d13f35..e8bddda56737 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt @@ -4,3 +4,10 @@ # IGT Version: 1.28-g0df7b9b97 # Linux Version: 6.9.0-rc7 kms_fb_coherency@memset-crc + +# Board Name: asus-C523NA-A20057-coral +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_universal_plane@cursor-fb-leak diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt index 2723e2832797..f352b719cf7d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt @@ -1,5 +1,5 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail -core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -9,10 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +i915_pm_rps@engine-order,Fail +kms_big_fb@linear-16bpp-rotate-180,Timeout kms_fb_coherency@memset-crc,Crash kms_flip@busy-flip,Timeout -kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail @@ -40,14 +40,11 @@ kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout -kms_prop_blob@invalid-set-prop,Fail kms_psr2_sf@cursor-plane-update-sf,Fail -kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout kms_psr2_sf@overlay-plane-update-continuous-sf,Fail kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail @@ -55,7 +52,6 @@ kms_psr2_sf@plane-move-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail kms_psr2_su@page_flip-NV12,Fail -kms_psr2_su@page_flip-P010,Fail kms_rotation_crc@primary-rotation-180,Timeout kms_setmode@basic,Fail kms_vblank@query-forked-hang,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt index 58a6001abb28..d8401251e5f4 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt @@ -11,3 +11,17 @@ kms_plane_alpha_blend@constant-alpha-min # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_atomic_transition@plane-all-modeset-transition-internal-panels + +# Board Name: asus-C436FA-Flip-hatch +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_plane_alpha_blend@constant-alpha-min + +# Board Name: asus-C436FA-Flip-hatch +# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_async_flips@crc diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt index 4821c9adefd1..6eb64c672f7d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt @@ -63,3 +63,4 @@ xe_module_load@load,Fail xe_module_load@many-reload,Fail xe_module_load@reload,Fail xe_module_load@reload-no-display,Fail +core_setmaster@master-drop-set-shared-fd,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt new file mode 100644 index 000000000000..ed9f7b576843 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt @@ -0,0 +1,51 @@ +core_setmaster@master-drop-set-user,Fail +i915_module_load@load,Fail +i915_module_load@reload,Fail +i915_module_load@reload-no-display,Fail +i915_module_load@resize-bar,Fail +i915_pm_rpm@gem-execbuf-stress,Timeout +i915_pm_rpm@module-reload,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_lease@lease-uevent,Fail +kms_pm_rpm@legacy-planes,Timeout +kms_pm_rpm@legacy-planes-dpms,Timeout +kms_pm_rpm@modeset-stress-extra-wait,Timeout +kms_pm_rpm@universal-planes,Timeout +kms_pm_rpm@universal-planes-dpms,Timeout +kms_rotation_crc@multiplane-rotation,Fail +kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail +kms_rotation_crc@multiplane-rotation-cropping-top,Fail +perf@i915-ref-count,Fail +perf_pmu@busy-accuracy-50,Fail +perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Fail +perf_pmu@rc6,Crash +sysfs_heartbeat_interval@long,Timeout +sysfs_heartbeat_interval@off,Timeout +sysfs_preempt_timeout@off,Timeout +sysfs_timeslice_duration@off,Timeout +xe_module_load@force-load,Fail +xe_module_load@load,Fail +xe_module_load@many-reload,Fail +xe_module_load@reload,Fail +xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt new file mode 100644 index 000000000000..5c3ef4486b9d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt @@ -0,0 +1,13 @@ +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_flip@flip-vs-panning-interruptible + +# Board Name: acer-cb317-1h-c3z6-dedede +# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +kms_universal_plane@cursor-fb-leak diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt new file mode 100644 index 000000000000..1a3d87c0ca6e --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt @@ -0,0 +1,20 @@ +# Suspend to RAM seems to be broken on this machine +.*suspend.* + +# Skip driver specific tests +^amdgpu.* +^msm.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# GEM tests takes ~1000 hours, so skip it +gem_.* + +# trap_err +i915_pm_rc6_residency.* + +# Hangs the machine and timeout occurs +i915_pm_rpm@system-hibernate* diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt index 1de04a3308c4..d4fba4f55ec1 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt @@ -17,12 +17,10 @@ perf@i915-ref-count,Fail perf_pmu@busy-accuracy-50,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash -prime_busy@after,Fail sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -testdisplay,Timeout xe_module_load@force-load,Fail xe_module_load@load,Fail xe_module_load@many-reload,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt index e728ccc62326..461ef69ef08a 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt @@ -1,40 +1,64 @@ +api_intel_allocator@fork-simple-stress-signal,Timeout +api_intel_allocator@open-vm,Timeout api_intel_allocator@simple-allocator,Timeout +api_intel_bb@lot-of-buffers,Timeout api_intel_bb@object-reloc-keep-cache,Timeout api_intel_bb@offset-control,Timeout -core_auth@getclient-simple,Timeout -core_hotunplug@hotunbind-rebind,Timeout +api_intel_bb@render-ccs,Timeout +api_intel_bb@reset-bb,Timeout +core_auth@basic-auth,Timeout +core_hotunplug@hotrebind,Timeout +core_setmaster@master-drop-set-user,Fail debugfs_test@read_all_entries_display_on,Timeout -drm_read@invalid-buffer,Timeout -drm_read@short-buffer-nonblock,Timeout +drm_read@empty-block,Timeout +dumb_buffer@create-clear,Timeout +dumb_buffer@invalid-bpp,Timeout gen3_render_tiledx_blits,Timeout gen7_exec_parse@basic-allocation,Timeout -gen7_exec_parse@batch-without-end,Timeout gen9_exec_parse@batch-invalid-length,Timeout gen9_exec_parse@bb-secure,Timeout gen9_exec_parse@secure-batches,Timeout gen9_exec_parse@shadow-peek,Timeout gen9_exec_parse@unaligned-jump,Timeout +i915_getparams_basic@basic-subslice-total,Timeout +i915_hangman@gt-engine-hang,Timeout i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail +i915_pciid,Timeout +i915_pipe_stress@stress-xrgb8888-ytiled,Timeout +i915_pm_rpm@gem-execbuf-stress,Timeout +i915_pm_rps@engine-order,Timeout +i915_pm_rps@thresholds-idle-park,Timeout i915_query@engine-info,Timeout i915_query@query-topology-kernel-writes,Timeout i915_query@test-query-geometry-subslices,Timeout kms_lease@lease-uevent,Fail kms_rotation_crc@multiplane-rotation,Fail perf@i915-ref-count,Fail +perf_pmu@busy,Timeout perf_pmu@enable-race,Timeout perf_pmu@event-wait,Timeout +perf_pmu@faulting-read,Timeout perf_pmu@gt-awake,Timeout perf_pmu@interrupts,Timeout perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Timeout perf_pmu@rc6,Crash +perf_pmu@render-node-busy-idle,Fail +perf_pmu@semaphore-wait-idle,Timeout +prime_busy@after,Timeout +prime_mmap@test_aperture_limit,Timeout prime_mmap@test_map_unmap,Timeout prime_mmap@test_refcounting,Timeout prime_self_import@basic-with_one_bo,Timeout +sriov_basic@enable-vfs-autoprobe-off,Timeout +syncobj_basic@bad-destroy,Timeout syncobj_basic@bad-flags-fd-to-handle,Timeout +syncobj_basic@create-signaled,Timeout syncobj_eventfd@invalid-bad-pad,Timeout +syncobj_eventfd@timeline-wait-before-signal,Timeout syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout syncobj_wait@invalid-signal-illegal-handle,Timeout syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt index 2adae2175501..0ce240e3aa07 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt @@ -1,5 +1,5 @@ +core_setmaster@master-drop-set-shared-fd,Fail core_setmaster@master-drop-set-user,Fail -core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -7,7 +7,8 @@ i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout +i915_pm_rps@engine-order,Fail +kms_big_fb@linear-16bpp-rotate-180,Timeout kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout kms_dirtyfb@default-dirtyfb-ioctl,Fail kms_dirtyfb@fbc-dirtyfb-ioctl,Fail @@ -32,19 +33,17 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail -kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout kms_frontbuffer_tracking@fbc-tiling-linear,Fail +kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail -kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_pm_rpm@universal-planes,Timeout kms_pm_rpm@universal-planes-dpms,Timeout -kms_prop_blob@invalid-set-prop,Fail kms_rotation_crc@primary-rotation-180,Timeout kms_vblank@query-forked-hang,Timeout perf@i915-ref-count,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt index a14349a1967f..8e0efc80d510 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail fbdev@eof,Fail fbdev@read,Fail kms_3d,Fail @@ -27,10 +22,6 @@ kms_cursor_legacy@cursor-vs-flip-atomic,Fail kms_cursor_legacy@cursor-vs-flip-legacy,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail -kms_flip@flip-vs-suspend,Fail -kms_flip@flip-vs-suspend-interruptible,Fail kms_lease@lease-uevent,Fail -kms_properties@get_properties-sanity-atomic,Fail -kms_properties@plane-properties-atomic,Fail -kms_properties@plane-properties-legacy,Fail kms_rmfb@close-fd,Fail +kms_flip@flip-vs-suspend-interruptible,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt index 8cb2cb67853d..845f852bb4a0 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt @@ -1,10 +1,5 @@ core_setmaster@master-drop-set-shared-fd,Fail -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Fail fbdev@eof,Fail fbdev@pan,Fail fbdev@read,Fail @@ -18,5 +13,4 @@ kms_color@invalid-gamma-lut-sizes,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-suspend,Fail kms_lease@lease-uevent,Fail -kms_properties@plane-properties-atomic,Fail kms_rmfb@close-fd,Fail diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt index 328967d3e23d..fc3745180683 100644 --- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt +++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt @@ -1,4 +1,3 @@ -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_cursor_legacy@forked-bo,Fail kms_cursor_legacy@forked-move,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt index 4ac46168eff3..066d24ee3e08 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_cursor_legacy@torture-bo,Fail kms_force_connector_basic@force-edid,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt index bd0653caf7a0..2893f98a6b97 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt @@ -1,7 +1,2 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_3d,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt index d42004cd6977..6dbc2080347d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail @@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash -kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail kms_cursor_legacy@cursor-vs-flip-toggle,Fail kms_cursor_legacy@cursor-vs-flip-varying-size,Fail -kms_display_modes@extended-mode-basic,Fail -kms_flip@2x-flip-vs-modeset-vs-hang,Fail -kms_flip@2x-flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_lease@lease-uevent,Fail -kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_lowres@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_vblank@ts-continuation-dpms-rpm,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt index d42004cd6977..6dbc2080347d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt @@ -1,8 +1,3 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail @@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail kms_color@ctm-negative,Fail kms_color@ctm-red-to-blue,Fail kms_color@ctm-signed,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash -kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail -kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail -kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail kms_cursor_legacy@cursor-vs-flip-toggle,Fail kms_cursor_legacy@cursor-vs-flip-varying-size,Fail -kms_display_modes@extended-mode-basic,Fail -kms_flip@2x-flip-vs-modeset-vs-hang,Fail -kms_flip@2x-flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-modeset-vs-hang,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_lease@lease-uevent,Fail -kms_multipipe_modeset@basic-max-pipe-crc-check,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail kms_plane_alpha_blend@alpha-7efc,Fail kms_plane_alpha_blend@coverage-7efc,Fail kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail -kms_plane_lowres@tiling-none,Fail kms_rmfb@close-fd,Fail -kms_vblank@ts-continuation-dpms-rpm,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt index 770a1c685fde..fa8c7e663858 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt @@ -1,8 +1,4 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail +drm_read@invalid-buffer,Fail kms_color@ctm-0-25,Fail kms_color@ctm-0-50,Fail kms_color@ctm-0-75,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt index 2aa96b1241c3..38ec0305c1f4 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt @@ -116,3 +116,17 @@ kms_cursor_legacy@flip-vs-cursor-toggle # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 msm/msm_shrink@copy-mmap-oom-8 + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_lease@page-flip-implicit-plane + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@flip-vs-expired-vblank diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt index 90651048ab61..94783cafc21a 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt @@ -25,3 +25,8 @@ core_hotunplug.* # Whole machine hangs kms_cursor_crc.* + +# IGT test crash +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_content_protection@uevent diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt new file mode 100644 index 000000000000..4892c0c70a6d --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt @@ -0,0 +1,15 @@ +kms_3d,Fail +kms_cursor_legacy@forked-bo,Fail +kms_cursor_legacy@forked-move,Fail +kms_cursor_legacy@single-bo,Fail +kms_cursor_legacy@single-move,Fail +kms_cursor_legacy@torture-bo,Fail +kms_cursor_legacy@torture-move,Fail +kms_hdmi_inject@inject-4k,Fail +kms_lease@lease-uevent,Fail +kms_plane_alpha_blend@alpha-7efc,Fail +kms_plane_alpha_blend@alpha-basic,Fail +kms_plane_alpha_blend@alpha-opaque-fb,Fail +kms_plane_alpha_blend@alpha-transparent-fb,Fail +kms_plane_alpha_blend@constant-alpha-max,Fail +msm/msm_recovery@gpu-fault-parallel,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt new file mode 100644 index 000000000000..c1859d9b165f --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt @@ -0,0 +1,6 @@ +# Board Name: sm8350-hdk +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/65 +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.12.0-rc1 +msm/msm_recovery@gpu-fault diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt new file mode 100644 index 000000000000..329770c520d9 --- /dev/null +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt @@ -0,0 +1,211 @@ +# Skip driver specific tests +^amdgpu.* +nouveau_.* +^panfrost.* +^v3d.* +^vc4.* +^vmwgfx* + +# Skip intel specific tests +gem_.* +i915_.* +tools_test.* + +# Currently fails and causes coverage loss for other tests +# since core_getversion also fails. +core_hotunplug.* + +# Kernel panic +msm/msm_mapping@ring +# DEBUG - Begin test msm/msm_mapping@ring +# [ 200.874157] [IGT] msm_mapping: executing +# [ 200.880236] [IGT] msm_mapping: starting subtest ring +# [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1) +# [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.950227] platform 3d6a000.gmu: [drm:a6xx_hfi_send_msg.constprop.0] *ERROR* Message HFI_H2F_MSG_GX_BW_PERF_VOTE id 25 timed out waiting for response +# [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 204.213387] platform 3d6a000.gmu: GMU watchdog expired +# [ 205.909103] adreno_fault_handler: 224274 callbacks suppressed +# [ 205.909108] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.925794] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.936529] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.947263] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.957997] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.968731] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.979465] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 205.990199] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.000932] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 206.011666] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.925090] adreno_fault_handler: 224511 callbacks suppressed +# [ 210.925096] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.941781] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.952517] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.963250] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.973985] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.984719] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 210.995452] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.006186] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.016921] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 211.027655] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.937100] adreno_fault_handler: 223760 callbacks suppressed +# [ 215.937106] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.953824] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.964573] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.975321] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.986067] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 215.996815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.007563] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.018310] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.029057] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 216.039805] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.945182] adreno_fault_handler: 222822 callbacks suppressed +# [ 220.945188] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.961897] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.972645] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.983392] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 220.994140] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.004889] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.015636] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.026383] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.037130] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 221.047879] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.953179] adreno_fault_handler: 223373 callbacks suppressed +# [ 225.953184] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.969883] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.980617] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 225.991350] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.002084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.012818] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.023551] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.034285] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.045019] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 226.055753] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1) +# [ 228.001087] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks: +# [ 228.007412] rcu: 0-....: (524 ticks this GP) idle=4ffc/1/0x4000000000000000 softirq=9367/9368 fqs=29 +# [ 228.017097] rcu: (detected by 1, t=6504 jiffies, g=29837, q=6 ncpus=8) +# [ 228.023959] Sending NMI from CPU 1 to CPUs 0: +# [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150] +# [ 228.173169] Modules linked in: +# [ 228.176361] irq event stamp: 2809595 +# [ 228.180083] hardirqs last enabled at (2809594): [<ffffd3bc52cb91ac>] exit_to_kernel_mode+0x38/0x130 +# [ 228.189547] hardirqs last disabled at (2809595): [<ffffd3bc52cb92c8>] el1_interrupt+0x24/0x64 +# [ 228.198377] softirqs last enabled at (1669060): [<ffffd3bc51936f98>] handle_softirqs+0x4a4/0x4bc +# [ 228.207565] softirqs last disabled at (1669063): [<ffffd3bc518905a4>] __do_softirq+0x14/0x20 +# [ 228.216316] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Not tainted 6.12.0-rc1-g685d530dc83a #1 +# [ 228.224966] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.231730] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +# [ 228.238948] pc : tcp_fastretrans_alert+0x0/0x884 +# [ 228.243751] lr : tcp_ack+0x9d4/0x1238 +# [ 228.247562] sp : ffff8000800036d0 +# [ 228.251011] x29: ffff8000800036d0 x28: 000000000000000c x27: 0000000000000001 +# [ 228.258421] x26: ffff704683cd8000 x25: 0000000000000403 x24: ffff70468b7e7c00 +# [ 228.265829] x23: 0000000000000000 x22: 0000000000000004 x21: 000000000000140f +# [ 228.273237] x20: 00000000f1de79f7 x19: 00000000f1de7a5f x18: 0000000000000001 +# [ 228.280644] x17: 00000000302d6762 x16: 632d6b64682d3035 x15: ffff704683c39000 +# [ 228.288051] x14: 00000000000e2000 x13: ffff704683df6000 x12: 0000000000000000 +# [ 228.295458] x11: 00000000000000a0 x10: 0000000000000000 x9 : ffffd3bc551a9a20 +# [ 228.302865] x8 : ffff800080003640 x7 : 0000000000040faa x6 : 00000000ffff9634 +# [ 228.310271] x5 : 00000000000005a8 x4 : ffff800080003788 x3 : ffff80008000377c +# [ 228.317679] x2 : 0000000000000000 x1 : 00000000f1de79f7 x0 : ffff704683cd8000 +# [ 228.325087] Call trace: +# [ 228.327640] tcp_fastretrans_alert+0x0/0x884 +# [ 228.332082] tcp_rcv_established+0x7c4/0x8bc +# [ 228.336523] tcp_v4_do_rcv+0x244/0x31c +# [ 228.340429] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.344155] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.348862] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.353566] ip_local_deliver+0x84/0x254 +# [ 228.357651] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.362092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.366081] ip_list_rcv+0xfc/0x190 +# [ 228.369711] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.375050] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.380564] napi_complete_done+0x64/0x1d0 +# [ 228.384826] lan78xx_poll+0x71c/0x9cc +# [ 228.388638] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.393341] net_rx_action+0x164/0x2d4 +# [ 228.397244] handle_softirqs+0x128/0x4bc +# [ 228.401329] __do_softirq+0x14/0x20 +# [ 228.404958] ____do_softirq+0x10/0x1c +# [ 228.408769] call_on_irq_stack+0x24/0x4c +# [ 228.412854] do_softirq_own_stack+0x1c/0x28 +# [ 228.417199] __irq_exit_rcu+0x124/0x164 +# [ 228.421188] irq_exit_rcu+0x10/0x38 +# [ 228.424819] el1_interrupt+0x38/0x64 +# [ 228.428546] el1h_64_irq_handler+0x18/0x24 +# [ 228.432807] el1h_64_irq+0x64/0x68 +# [ 228.436354] lock_acquire+0x214/0x32c +# [ 228.440166] __mutex_lock+0x98/0x3d0 +# [ 228.443893] mutex_lock_nested+0x24/0x30 +# [ 228.447978] fault_worker+0x58/0x184 +# [ 228.451704] kthread_worker_fn+0xf4/0x320 +# [ 228.455873] kthread+0x114/0x118 +# [ 228.459243] ret_from_fork+0x10/0x20 +# [ 228.462970] Kernel panic - not syncing: softlockup: hung tasks +# [ 228.469018] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Tainted: G L 6.12.0-rc1-g685d530dc83a #1 +# [ 228.479190] Tainted: [L]=SOFTLOCKUP +# [ 228.482815] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +# [ 228.489574] Call trace: +# [ 228.492125] dump_backtrace+0x98/0xf0 +# [ 228.495931] show_stack+0x18/0x24 +# [ 228.499380] dump_stack_lvl+0x38/0xd0 +# [ 228.503189] dump_stack+0x18/0x24 +# [ 228.506639] panic+0x3bc/0x41c +# [ 228.509826] watchdog_timer_fn+0x254/0x2e4 +# [ 228.514087] __hrtimer_run_queues+0x3b0/0x40c +# [ 228.518612] hrtimer_interrupt+0xe8/0x248 +# [ 228.522777] arch_timer_handler_virt+0x2c/0x44 +# [ 228.527399] handle_percpu_devid_irq+0xa8/0x2c4 +# [ 228.532103] generic_handle_domain_irq+0x2c/0x44 +# [ 228.536902] gic_handle_irq+0x4c/0x11c +# [ 228.540802] do_interrupt_handler+0x50/0x84 +# [ 228.545146] el1_interrupt+0x34/0x64 +# [ 228.548870] el1h_64_irq_handler+0x18/0x24 +# [ 228.553128] el1h_64_irq+0x64/0x68 +# [ 228.556672] tcp_fastretrans_alert+0x0/0x884 +# [ 228.561110] tcp_rcv_established+0x7c4/0x8bc +# [ 228.565548] tcp_v4_do_rcv+0x244/0x31c +# [ 228.569449] tcp_v4_rcv+0xcc4/0x1084 +# [ 228.573171] ip_protocol_deliver_rcu+0x64/0x218 +# [ 228.577873] ip_local_deliver_finish+0xb8/0x1ac +# [ 228.582574] ip_local_deliver+0x84/0x254 +# [ 228.586655] ip_sublist_rcv_finish+0x84/0xb8 +# [ 228.591092] ip_sublist_rcv+0x11c/0x2f0 +# [ 228.595079] ip_list_rcv+0xfc/0x190 +# [ 228.598706] __netif_receive_skb_list_core+0x174/0x208 +# [ 228.604039] netif_receive_skb_list_internal+0x204/0x3ac +# [ 228.609549] napi_complete_done+0x64/0x1d0 +# [ 228.613808] lan78xx_poll+0x71c/0x9cc +# [ 228.617614] __napi_poll.constprop.0+0x3c/0x254 +# [ 228.622314] net_rx_action+0x164/0x2d4 +# [ 228.626214] handle_softirqs+0x128/0x4bc +# [ 228.630297] __do_softirq+0x14/0x20 +# [ 228.633923] ____do_softirq+0x10/0x1c +# [ 228.637729] call_on_irq_stack+0x24/0x4c +# [ 228.641811] do_softirq_own_stack+0x1c/0x28 +# [ 228.646152] __irq_exit_rcu+0x124/0x164 +# [ 228.650139] irq_exit_rcu+0x10/0x38 +# [ 228.653768] el1_interrupt+0x38/0x64 +# [ 228.657491] el1h_64_irq_handler+0x18/0x24 +# [ 228.661750] el1h_64_irq+0x64/0x68 +# [ 228.665293] lock_acquire+0x214/0x32c +# [ 228.669098] __mutex_lock+0x98/0x3d0 +# [ 228.672821] mutex_lock_nested+0x24/0x30 +# [ 228.676903] fault_worker+0x58/0x184 +# [ 228.680626] kthread_worker_fn+0xf4/0x320 +# [ 228.684790] kthread+0x114/0x118 +# [ 228.688156] ret_from_fork+0x10/0x20 +# [ 228.691882] SMP: stopping secondary CPUs +# [ 229.736843] SMP: failed to stop secondary CPUs 1,4 +# [ 229.741827] Kernel Offset: 0x53bbd1880000 from 0xffff800080000000 +# [ 229.748159] PHYS_OFFSET: 0xfff08fba80000000 +# [ 229.752499] CPU features: 0x18,00000017,00200928,4200720b +# [ 229.758095] Memory Limit: none +# [ 229.761291] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]--- diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt index 4a2f4b6b14c1..8330b934602a 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Crash +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Crash diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt index fe8ce2ce33e6..abd1ccb71561 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt @@ -1 +1,2 @@ panfrost/panfrost_prime@gem-prime-import,Fail +panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt deleted file mode 100644 index 5e6d48d98e4e..000000000000 --- a/drivers/gpu/drm/ci/xfails/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -git+https://gitlab.freedesktop.org/gfx-ci/ci-collate@09e7142715c16f54344ddf97013331ba063b162b -termcolor==2.3.0 - -# ci-collate dependencies -certifi==2023.7.22 -charset-normalizer==3.2.0 -idna==3.4 -pip==23.3 -python-gitlab==3.15.0 -requests==2.31.0 -requests-toolbelt==1.0.0 -ruamel.yaml==0.17.32 -ruamel.yaml.clib==0.2.7 -setuptools==70.0.0 -tenacity==8.2.3 -urllib3==2.0.7 -wheel==0.41.1 diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt index ea7b2ceb95b9..90282dfa19f4 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt @@ -1,18 +1,24 @@ -core_setmaster@master-drop-set-root,Crash core_setmaster@master-drop-set-user,Crash -core_setmaster_vs_auth,Crash -device_reset@cold-reset-bound,Crash -device_reset@reset-bound,Crash -device_reset@unbind-cold-reset-rebind,Crash -device_reset@unbind-reset-rebind,Crash dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Crash fbdev@pan,Crash +kms_bw@linear-tiling-2-displays-1920x1080p,Fail kms_cursor_crc@cursor-onscreen-32x10,Crash kms_cursor_crc@cursor-onscreen-32x32,Crash +kms_cursor_crc@cursor-onscreen-64x64,Crash kms_cursor_crc@cursor-random-32x10,Crash +kms_cursor_crc@cursor-sliding-32x10,Crash kms_cursor_crc@cursor-sliding-32x32,Crash +kms_cursor_crc@cursor-sliding-64x21,Crash kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash +kms_flip@flip-vs-panning-vs-hang,Crash +kms_invalid_mode@int-max-clock,Crash +kms_lease@invalid-create-leases,Fail +kms_pipe_crc_basic@read-crc-frame-sequence,Crash +kms_plane@pixel-format,Crash +kms_plane@pixel-format-source-clamping,Crash kms_prop_blob@invalid-set-prop,Crash -kms_prop_blob@invalid-set-prop-any,Crash +kms_properties@get_properties-sanity-atomic,Crash +kms_properties@get_properties-sanity-non-atomic,Crash +kms_rmfb@close-fd,Crash diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt index 7ede273aab20..cd0b27d8b636 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt @@ -4,3 +4,31 @@ # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_cursor_legacy@flip-vs-cursor-atomic + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_crc@cursor-offscreen-32x10 + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_edge_walk@64x64-left-edge + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_flip@plain-flip-ts-check + +# Board Name: rk3288-veyron-jaq +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 100 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_cursor_crc@cursor-alpha-opaque diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt index 9309ff15e23a..83a38853b4af 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt @@ -1,9 +1,4 @@ -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail dumb_buffer@create-clear,Crash -dumb_buffer@invalid-bpp,Fail kms_atomic_transition@modeset-transition,Fail kms_atomic_transition@modeset-transition-fencing,Fail kms_atomic_transition@plane-toggle-modeset-transition,Fail @@ -46,7 +41,6 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail kms_flip@basic-flip-vs-wf_vblank,Fail kms_flip@blocking-wf_vblank,Fail -kms_flip@dpms-vs-vblank-race,Fail kms_flip@flip-vs-absolute-wf_vblank,Fail kms_flip@flip-vs-blocking-wf-vblank,Fail kms_flip@flip-vs-modeset-vs-hang,Fail @@ -59,7 +53,6 @@ kms_flip@plain-flip-fb-recreate,Fail kms_flip@plain-flip-fb-recreate-interruptible,Fail kms_flip@plain-flip-ts-check,Fail kms_flip@plain-flip-ts-check-interruptible,Fail -kms_flip@wf_vblank-ts-check,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_invalid_mode@int-max-clock,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt index d98f6a17343c..56f7d4f1ed15 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt @@ -46,3 +46,31 @@ kms_setmode@basic # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_bw@connected-linear-tiling-1-displays-2560x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc2 +kms_flip@wf_vblank-ts-check + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@dpms-vs-vblank-race + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_bw@linear-tiling-2-displays-2160x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t +# Failure Rate: 50 +# IGT Version: 1.28-ga73311079 +# Linux Version: 6.11.0-rc5 +kms_flip@flip-vs-expired-vblank diff --git a/drivers/gpu/drm/ci/xfails/update-xfails.py b/drivers/gpu/drm/ci/xfails/update-xfails.py deleted file mode 100755 index a446e98d72a1..000000000000 --- a/drivers/gpu/drm/ci/xfails/update-xfails.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -from collections import defaultdict -import difflib -import os -import re -from glcollate import Collate -from termcolor import colored -from urllib.parse import urlparse - - -def get_canonical_name(job_name): - return re.split(r" \d+/\d+", job_name)[0] - - -def get_xfails_file_path(job_name, suffix): - canonical_name = get_canonical_name(job_name) - name = canonical_name.replace(":", "-") - script_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(script_dir, f"{name}-{suffix}.txt") - - -def get_unit_test_name_and_results(unit_test): - if "Artifact results/failures.csv not found" in unit_test or '' == unit_test: - return None, None - unit_test_name, unit_test_result = unit_test.strip().split(",") - return unit_test_name, unit_test_result - - -def read_file(file_path): - try: - with open(file_path, "r") as file: - f = file.readlines() - if len(f): - f[-1] = f[-1].strip() + "\n" - return f - except FileNotFoundError: - return [] - - -def save_file(content, file_path): - # delete file is content is empty - if not content or not any(content): - if os.path.exists(file_path): - os.remove(file_path) - return - - with open(file_path, "w") as file: - file.writelines(content) - - -def is_test_present_on_file(file_content, unit_test_name): - return any(unit_test_name in line for line in file_content) - - -def is_unit_test_present_in_other_jobs(unit_test, job_ids): - return all(unit_test in job_ids[job_id] for job_id in job_ids) - - -def remove_unit_test_if_present(lines, unit_test_name): - if not is_test_present_on_file(lines, unit_test_name): - return - lines[:] = [line for line in lines if unit_test_name not in line] - - -def add_unit_test_if_not_present(lines, unit_test_name, file_name): - # core_getversion is mandatory - if "core_getversion" in unit_test_name: - print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name)) - elif all(unit_test_name not in line for line in lines): - lines.append(unit_test_name + "\n") - - -def update_unit_test_result_in_fails_txt(fails_txt, unit_test): - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - for i, line in enumerate(fails_txt): - if unit_test_name in line: - _, current_result = get_unit_test_name_and_results(line) - fails_txt[i] = unit_test + "\n" - return - - -def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path): - unit_test_name, _ = get_unit_test_name_and_results(unit_test) - if not is_test_present_on_file(fails_txt, unit_test_name): - add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path) - # if it is present but not with the same result - elif not is_test_present_on_file(fails_txt, unit_test): - update_unit_test_result_in_fails_txt(fails_txt, unit_test) - - -def split_unit_test_from_collate(xfails): - for job_name in xfails.keys(): - for job_id in xfails[job_name].copy().keys(): - if "not found" in xfails[job_name][job_id].content_as_str: - del xfails[job_name][job_id] - continue - xfails[job_name][job_id] = xfails[job_name][job_id].content_as_str.splitlines() - - -def get_xfails_from_pipeline_url(pipeline_url): - parsed_url = urlparse(pipeline_url) - path_components = parsed_url.path.strip("/").split("/") - - namespace = path_components[0] - project = path_components[1] - pipeline_id = path_components[-1] - - print("Collating from:", namespace, project, pipeline_id) - xfails = ( - Collate(namespace=namespace, project=project) - .from_pipeline(pipeline_id) - .get_artifact("results/failures.csv") - ) - - split_unit_test_from_collate(xfails) - return xfails - - -def get_xfails_from_pipeline_urls(pipelines_urls): - xfails = defaultdict(dict) - - for url in pipelines_urls: - new_xfails = get_xfails_from_pipeline_url(url) - for key in new_xfails: - xfails[key].update(new_xfails[key]) - - return xfails - - -def print_diff(old_content, new_content, file_name): - diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name) - diff = [colored(line, "green") if line.startswith("+") else - colored(line, "red") if line.startswith("-") else line for line in diff] - print("\n".join(diff[:3])) - print("".join(diff[3:])) - - -def main(pipelines_urls, only_flakes): - xfails = get_xfails_from_pipeline_urls(pipelines_urls) - - for job_name in xfails.keys(): - fails_txt_path = get_xfails_file_path(job_name, "fails") - flakes_txt_path = get_xfails_file_path(job_name, "flakes") - - fails_txt = read_file(fails_txt_path) - flakes_txt = read_file(flakes_txt_path) - - fails_txt_original = fails_txt.copy() - flakes_txt_original = flakes_txt.copy() - - for job_id in xfails[job_name].keys(): - for unit_test in xfails[job_name][job_id]: - unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test) - - if not unit_test_name: - continue - - if only_flakes: - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # drop it from flakes if it is present to analyze it again - remove_unit_test_if_present(flakes_txt, unit_test_name) - - if unit_test_result == "UnexpectedPass": - remove_unit_test_if_present(fails_txt, unit_test_name) - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # flake result - if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]): - remove_unit_test_if_present(fails_txt, unit_test_name) - add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path) - continue - - # consistent result - add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, - fails_txt_path) - - fails_txt.sort() - flakes_txt.sort() - - if fails_txt != fails_txt_original: - save_file(fails_txt, fails_txt_path) - print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path)) - if flakes_txt != flakes_txt_original: - save_file(flakes_txt, flakes_txt_path) - print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.") - parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.") - parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.") - - args = parser.parse_args() - - main(args.pipeline_urls, args.only_flakes) - print("Done.") diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt index 5408110f4c60..71c02104a683 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt @@ -1,24 +1,3 @@ -core_hotunplug@hotrebind,Fail -core_hotunplug@hotrebind-lateclose,Fail -core_hotunplug@hotreplug,Fail -core_hotunplug@hotreplug-lateclose,Fail -core_hotunplug@hotunbind-rebind,Fail -core_hotunplug@hotunplug-rescan,Fail -core_hotunplug@unbind-rebind,Fail -core_hotunplug@unplug-rescan,Fail -device_reset@cold-reset-bound,Fail -device_reset@reset-bound,Fail -device_reset@unbind-cold-reset-rebind,Fail -device_reset@unbind-reset-rebind,Fail -dumb_buffer@invalid-bpp,Fail -kms_content_protection@atomic,Crash -kms_content_protection@atomic-dpms,Crash -kms_content_protection@content-type-change,Crash -kms_content_protection@lic-type-0,Crash -kms_content_protection@lic-type-1,Crash -kms_content_protection@srm,Crash -kms_content_protection@type1,Crash -kms_content_protection@uevent,Crash kms_cursor_crc@cursor-rapid-movement-128x128,Fail kms_cursor_crc@cursor-rapid-movement-128x42,Fail kms_cursor_crc@cursor-rapid-movement-256x256,Fail diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt index 5ccc771fbb36..b3d16e82e9a2 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt @@ -205,6 +205,59 @@ kms_cursor_edge_walk@128x128-right-edge # R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810 # R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000 +kms_cursor_edge_walk@128x128-left-edge +# DEBUG - Begin test kms_cursor_edge_walk@128x128-left-edge +# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI +# CPU: 0 UID: 0 PID: 27 Comm: kworker/u8:1 Not tainted 6.11.0-rc5-g5d3429a7e9aa #1 +# Hardware name: ChromiumOS crosvm, BIOS 0 +# Workqueue: vkms_composer vkms_composer_worker [vkms] +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000 +# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000 +# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff +# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60 +# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0 +# Call Trace: +# <TASK> +# ? __die+0x1e/0x60 +# ? page_fault_oops+0x17b/0x4a0 +# ? exc_page_fault+0x6d/0x230 +# ? asm_exc_page_fault+0x26/0x30 +# ? compose_active_planes+0x344/0x4e0 [vkms] +# ? compose_active_planes+0x32f/0x4e0 [vkms] +# ? srso_return_thunk+0x5/0x5f +# vkms_composer_worker+0x205/0x240 [vkms] +# process_one_work+0x201/0x6c0 +# ? lock_is_held_type+0x9e/0x110 +# worker_thread+0x17e/0x310 +# ? __pfx_worker_thread+0x10/0x10 +# kthread+0xce/0x100 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork+0x2f/0x50 +# ? __pfx_kthread+0x10/0x10 +# ret_from_fork_asm+0x1a/0x30 +# </TASK> +# Modules linked in: vkms +# CR2: 0000000000000018 +# ---[ end trace 0000000000000000 ]--- +# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms] +# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48 +# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282 +# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000 +# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000 +# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff +# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60 +# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 +# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000 +# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0 +# vkms_vblank_simulate: vblank timer overrun + # Skip driver specific tests ^amdgpu.* ^msm.* diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 3b824e01c9b5..6a4e892afcf8 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -3,7 +3,7 @@ config DRM_DISPLAY_DP_AUX_BUS tristate depends on DRM - depends on OF || COMPILE_TEST + depends on OF config DRM_DISPLAY_HELPER tristate @@ -64,6 +64,12 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG If in doubt, say "N". +config DRM_DISPLAY_DSC_HELPER + bool + depends on DRM_DISPLAY_HELPER + help + DRM display helpers for VESA DSC (used by DSI and DisplayPort). + config DRM_DISPLAY_HDCP_HELPER bool help diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index fbb9d2b8acd4..629c834c3192 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,10 +8,11 @@ drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_dual_mode_helper.o \ drm_dp_helper.o \ - drm_dp_mst_topology.o \ - drm_dsc_helper.o + drm_dp_mst_topology.o drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ drm_dp_tunnel.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \ + drm_dsc_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 3da5b8bf8259..320c297008aa 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -397,11 +397,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, bridge_connector->encoder = encoder; /* - * TODO: Handle doublescan_allowed, stereo_allowed and - * ycbcr_420_allowed. + * TODO: Handle doublescan_allowed and stereo_allowed. */ connector = &bridge_connector->base; connector->interlace_allowed = true; + connector->ycbcr_420_allowed = true; /* * Initialise connector status handling. First locate the furthest @@ -414,6 +414,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, drm_for_each_bridge_in_chain(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; + if (!bridge->ycbcr_420_allowed) + connector->ycbcr_420_allowed = false; if (bridge->ops & DRM_BRIDGE_OP_EDID) bridge_connector->bridge_edid = bridge; diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index 14a2a8473682..c491e3203bf1 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write); static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) { - static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] = "DP-HDMI ADAPTOR\x04"; return memcmp(hdmi_id, dp_dual_mode_hdmi_id, - sizeof(dp_dual_mode_hdmi_id)) == 0; + DP_DUAL_MODE_HDMI_ID_LEN) == 0; } static bool is_type1_adaptor(uint8_t adaptor_id) diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c deleted file mode 100644 index 5729f3bb4398..000000000000 --- a/drivers/gpu/drm/drm_aperture.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: MIT - -#include <linux/aperture.h> -#include <linux/platform_device.h> - -#include <drm/drm_aperture.h> -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -/** - * DOC: overview - * - * A graphics device might be supported by different drivers, but only one - * driver can be active at any given time. Many systems load a generic - * graphics drivers, such as EFI-GOP or VESA, early during the boot process. - * During later boot stages, they replace the generic driver with a dedicated, - * hardware-specific driver. To take over the device the dedicated driver - * first has to remove the generic driver. DRM aperture functions manage - * ownership of DRM framebuffer memory and hand-over between drivers. - * - * DRM drivers should call drm_aperture_remove_conflicting_framebuffers() - * at the top of their probe function. The function removes any generic - * driver that is currently associated with the given framebuffer memory. - * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the - * example given below. - * - * .. code-block:: c - * - * static const struct drm_driver example_driver = { - * ... - * }; - * - * static int remove_conflicting_framebuffers(struct pci_dev *pdev) - * { - * resource_size_t base, size; - * int ret; - * - * base = pci_resource_start(pdev, 0); - * size = pci_resource_len(pdev, 0); - * - * return drm_aperture_remove_conflicting_framebuffers(base, size, - * &example_driver); - * } - * - * static int probe(struct pci_dev *pdev) - * { - * int ret; - * - * // Remove any generic drivers... - * ret = remove_conflicting_framebuffers(pdev); - * if (ret) - * return ret; - * - * // ... and initialize the hardware. - * ... - * - * drm_dev_register(); - * - * return 0; - * } - * - * PCI device drivers should call - * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the - * framebuffer apertures automatically. Device drivers without knowledge of - * the framebuffer's location shall call drm_aperture_remove_framebuffers(), - * which removes all drivers for known framebuffer. - * - * Drivers that are susceptible to being removed by other drivers, such as - * generic EFI or VESA drivers, have to register themselves as owners of their - * given framebuffer memory. Ownership of the framebuffer memory is achieved - * by calling devm_aperture_acquire_from_firmware(). On success, the driver - * is the owner of the framebuffer range. The function fails if the - * framebuffer is already owned by another driver. See below for an example. - * - * .. code-block:: c - * - * static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev) - * { - * resource_size_t base, size; - * - * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * if (!mem) - * return -EINVAL; - * base = mem->start; - * size = resource_size(mem); - * - * return devm_acquire_aperture_from_firmware(dev, base, size); - * } - * - * static int probe(struct platform_device *pdev) - * { - * struct drm_device *dev; - * int ret; - * - * // ... Initialize the device... - * dev = devm_drm_dev_alloc(); - * ... - * - * // ... and acquire ownership of the framebuffer. - * ret = acquire_framebuffers(dev, pdev); - * if (ret) - * return ret; - * - * drm_dev_register(dev, 0); - * - * return 0; - * } - * - * The generic driver is now subject to forced removal by other drivers. This - * only works for platform drivers that support hot unplug. - * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al. - * for the registered framebuffer range, the aperture helpers call - * platform_device_unregister() and the generic driver unloads itself. It - * may not access the device's registers, framebuffer memory, ROM, etc - * afterwards. - */ - -/** - * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer - * on behalf of a DRM driver. - * @dev: the DRM device to own the framebuffer memory - * @base: the framebuffer's byte offset in physical memory - * @size: the framebuffer size in bytes - * - * Installs the given device as the new owner of the framebuffer. The function - * expects the framebuffer to be provided by a platform device that has been - * set up by firmware. Firmware can be any generic interface, such as EFI, - * VESA, VGA, etc. If the native hardware driver takes over ownership of the - * framebuffer range, the firmware state gets lost. Aperture helpers will then - * unregister the platform device automatically. Acquired apertures are - * released automatically if the underlying device goes away. - * - * The function fails if the framebuffer range, or parts of it, is currently - * owned by another driver. To evict current owners, callers should use - * drm_aperture_remove_conflicting_framebuffers() et al. before calling this - * function. The function also fails if the given device is not a platform - * device. - * - * Returns: - * 0 on success, or a negative errno value otherwise. - */ -int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, - resource_size_t size) -{ - struct platform_device *pdev; - - if (drm_WARN_ON(dev, !dev_is_platform(dev->dev))) - return -EINVAL; - - pdev = to_platform_device(dev->dev); - - return devm_aperture_acquire_for_platform_device(pdev, base, size); -} -EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); - -/** - * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range - * @base: the aperture's base address in physical memory - * @size: aperture size in bytes - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers which use the memory range described by - * @base and @size. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_devices(base, size, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); - -/** - * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices - * @pdev: PCI device - * @req_driver: requesting DRM driver - * - * This function removes graphics device drivers using the memory range configured - * for any of @pdev's memory bars. The function assumes that a PCI device with - * shadowed ROM drives a primary display and so kicks out vga16fb. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const struct drm_driver *req_driver) -{ - return aperture_remove_conflicting_pci_devices(pdev, req_driver->name); -} -EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 0fc99da93afe..9ea2611770f4 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1132,6 +1132,8 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed); + drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed); drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace)); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 43cdf39019a4..5186d2114a50 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3015,7 +3015,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall) { int i, ret; - unsigned long flags; + unsigned long flags = 0; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; struct drm_crtc *crtc; diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index bfedcbf516db..549b28a5918c 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -10,7 +10,6 @@ #include <linux/slab.h> #include <drm/drm_client.h> -#include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> @@ -172,99 +171,6 @@ void drm_client_release(struct drm_client_dev *client) } EXPORT_SYMBOL(drm_client_release); -/** - * drm_client_dev_unregister - Unregister clients - * @dev: DRM device - * - * This function releases all clients by calling each client's - * &drm_client_funcs.unregister callback. The callback function - * is responsibe for releaseing all resources including the client - * itself. - * - * The helper drm_dev_unregister() calls this function. Drivers - * that use it don't need to call this function themselves. - */ -void drm_client_dev_unregister(struct drm_device *dev) -{ - struct drm_client_dev *client, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { - list_del(&client->list); - if (client->funcs && client->funcs->unregister) { - client->funcs->unregister(client); - } else { - drm_client_release(client); - kfree(client); - } - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_unregister); - -/** - * drm_client_dev_hotplug - Send hotplug event to clients - * @dev: DRM device - * - * This function calls the &drm_client_funcs.hotplug callback on the attached clients. - * - * drm_kms_helper_hotplug_event() calls this function, so drivers that use it - * don't need to call this function themselves. - */ -void drm_client_dev_hotplug(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - if (!dev->mode_config.num_connector) { - drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); - return; - } - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->hotplug) - continue; - - if (client->hotplug_failed) - continue; - - ret = client->funcs->hotplug(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (ret) - client->hotplug_failed = true; - } - mutex_unlock(&dev->clientlist_mutex); -} -EXPORT_SYMBOL(drm_client_dev_hotplug); - -void drm_client_dev_restore(struct drm_device *dev) -{ - struct drm_client_dev *client; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) { - if (!client->funcs || !client->funcs->restore) - continue; - - ret = client->funcs->restore(client); - drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); - if (!ret) /* The first one to return zero gets the privilege to restore */ - break; - } - mutex_unlock(&dev->clientlist_mutex); -} - static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { @@ -584,30 +490,3 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re 0, 0, NULL, 0); } EXPORT_SYMBOL(drm_client_framebuffer_flush); - -#ifdef CONFIG_DEBUG_FS -static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_client_dev *client; - - mutex_lock(&dev->clientlist_mutex); - list_for_each_entry(client, &dev->clientlist, list) - drm_printf(&p, "%s\n", client->name); - mutex_unlock(&dev->clientlist_mutex); - - return 0; -} - -static const struct drm_debugfs_info drm_client_debugfs_list[] = { - { "internal_clients", drm_client_debugfs_internal_clients, 0 }, -}; - -void drm_client_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_client_debugfs_list, - ARRAY_SIZE(drm_client_debugfs_list)); -} -#endif diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c new file mode 100644 index 000000000000..e303de564485 --- /dev/null +++ b/drivers/gpu/drm/drm_client_event.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> + +#include <drm/drm_client.h> +#include <drm/drm_client_event.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +/** + * drm_client_dev_unregister - Unregister clients + * @dev: DRM device + * + * This function releases all clients by calling each client's + * &drm_client_funcs.unregister callback. The callback function + * is responsibe for releaseing all resources including the client + * itself. + * + * The helper drm_dev_unregister() calls this function. Drivers + * that use it don't need to call this function themselves. + */ +void drm_client_dev_unregister(struct drm_device *dev) +{ + struct drm_client_dev *client, *tmp; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { + list_del(&client->list); + if (client->funcs && client->funcs->unregister) { + client->funcs->unregister(client); + } else { + drm_client_release(client); + kfree(client); + } + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_unregister); + +/** + * drm_client_dev_hotplug - Send hotplug event to clients + * @dev: DRM device + * + * This function calls the &drm_client_funcs.hotplug callback on the attached clients. + * + * drm_kms_helper_hotplug_event() calls this function, so drivers that use it + * don't need to call this function themselves. + */ +void drm_client_dev_hotplug(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (!dev->mode_config.num_connector) { + drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n"); + return; + } + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->hotplug) + continue; + + if (client->hotplug_failed) + continue; + + ret = client->funcs->hotplug(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (ret) + client->hotplug_failed = true; + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_hotplug); + +void drm_client_dev_restore(struct drm_device *dev) +{ + struct drm_client_dev *client; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->funcs || !client->funcs->restore) + continue; + + ret = client->funcs->restore(client); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + if (!ret) /* The first one to return zero gets the privilege to restore */ + break; + } + mutex_unlock(&dev->clientlist_mutex); +} + +static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, client->suspended)) + return 0; + + if (client->funcs && client->funcs->suspend) + ret = client->funcs->suspend(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = true; + + return ret; +} + +void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (!client->suspended) + drm_client_suspend(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_suspend); + +static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_device *dev = client->dev; + int ret = 0; + + if (drm_WARN_ON_ONCE(dev, !client->suspended)) + return 0; + + if (client->funcs && client->funcs->resume) + ret = client->funcs->resume(client, holds_console_lock); + drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); + + client->suspended = false; + + return ret; +} + +void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +{ + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) { + if (client->suspended) + drm_client_resume(client, holds_console_lock); + } + mutex_unlock(&dev->clientlist_mutex); +} +EXPORT_SYMBOL(drm_client_dev_resume); + +#ifdef CONFIG_DEBUG_FS +static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_client_dev *client; + + mutex_lock(&dev->clientlist_mutex); + list_for_each_entry(client, &dev->clientlist, list) + drm_printf(&p, "%s\n", client->name); + mutex_unlock(&dev->clientlist_mutex); + + return 0; +} + +static const struct drm_debugfs_info drm_client_debugfs_list[] = { + { "internal_clients", drm_client_debugfs_internal_clients, 0 }, +}; + +void drm_client_debugfs_init(struct drm_device *dev) +{ + drm_debugfs_add_files(dev, drm_client_debugfs_list, + ARRAY_SIZE(drm_client_debugfs_list)); +} +#endif diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index cee5eafbfb81..251f94313717 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -145,7 +145,7 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) } static struct drm_display_mode * -drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height) +drm_connector_preferred_mode(struct drm_connector *connector, int width, int height) { struct drm_display_mode *mode; @@ -159,6 +159,12 @@ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int return NULL; } +static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector) +{ + return list_first_entry_or_null(&connector->modes, + struct drm_display_mode, head); +} + static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector) { struct drm_cmdline_mode *cmdline_mode; @@ -331,7 +337,7 @@ static bool drm_client_target_cloned(struct drm_device *dev, if (!modes[i]) can_clone = false; } - kfree(dmt_mode); + drm_mode_destroy(dev, dmt_mode); if (can_clone) { drm_dbg_kms(dev, "can clone using 1024x768\n"); @@ -441,13 +447,11 @@ retry: drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n", connector->base.id, connector->name, connector->tile_group ? connector->tile_group->id : 0); - modes[i] = drm_connector_has_preferred_mode(connector, width, height); + modes[i] = drm_connector_preferred_mode(connector, width, height); } /* No preferred modes, pick one off the list */ - if (!modes[i] && !list_empty(&connector->modes)) { - list_for_each_entry(modes[i], &connector->modes, head) - break; - } + if (!modes[i]) + modes[i] = drm_connector_first_mode(connector); /* * In case of tiled mode if all tiles not present fallback to * first available non tiled mode. @@ -531,7 +535,7 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, my_score++; if (connector->cmdline_mode.specified) my_score++; - if (drm_connector_has_preferred_mode(connector, width, height)) + if (drm_connector_preferred_mode(connector, width, height)) my_score++; /* @@ -686,16 +690,14 @@ retry: "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n", connector->base.id, connector->name, str_yes_no(connector->has_tile)); - modes[i] = drm_connector_has_preferred_mode(connector, width, height); + modes[i] = drm_connector_preferred_mode(connector, width, height); } /* No preferred mode marked by the EDID? Are there any modes? */ if (!modes[i] && !list_empty(&connector->modes)) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n", connector->base.id, connector->name); - modes[i] = list_first_entry(&connector->modes, - struct drm_display_mode, - head); + modes[i] = drm_connector_first_mode(connector); } /* last resort: use current mode */ @@ -878,7 +880,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, break; } - kfree(modeset->mode); + drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, mode); if (!modeset->mode) { ret = -ENOMEM; diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/drm_client_setup.c new file mode 100644 index 000000000000..c14221ca5a0d --- /dev/null +++ b/drivers/gpu/drm/drm_client_setup.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +#include <drm/drm_client_setup.h> +#include <drm/drm_device.h> +#include <drm/drm_fbdev_client.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> + +/** + * drm_client_setup() - Setup in-kernel DRM clients + * @dev: DRM device + * @format: Preferred pixel format for the device. Use NULL, unless + * there is clearly a driver-preferred format. + * + * This function sets up the in-kernel DRM clients. Restore, hotplug + * events and teardown are all taken care of. + * + * Drivers should call drm_client_setup() after registering the new + * DRM device with drm_dev_register(). This function is safe to call + * even when there are no connectors present. Setup will be retried + * on the next hotplug event. + * + * The clients are destroyed by drm_dev_unregister(). + */ +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + int ret; + + ret = drm_fbdev_client_setup(dev, format); + if (ret) + drm_warn(dev, "Failed to set up DRM client; error %d\n", ret); +} +EXPORT_SYMBOL(drm_client_setup); + +/** + * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @fourcc: Preferred pixel format as 4CC code for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a 4CC code as second argument. + */ +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ + drm_client_setup(dev, drm_format_info(fourcc)); +} +EXPORT_SYMBOL(drm_client_setup_with_fourcc); + +/** + * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @color_mode: Preferred color mode for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a color mode as second argument. + * + * Do not use this function in new drivers. Prefer drm_client_setup() with a + * format of NULL. + */ +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode) +{ + u32 fourcc = drm_driver_color_mode_format(dev, color_mode); + + drm_client_setup_with_fourcc(dev, fourcc); +} +EXPORT_SYMBOL(drm_client_setup_with_color_mode); + +MODULE_DESCRIPTION("In-kernel DRM clients"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d3e6dd68810..536409a35df4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -32,7 +32,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_auth.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -78,12 +77,14 @@ static int drm_clients_info(struct seq_file *m, void *data) kuid_t uid; seq_printf(m, - "%20s %5s %3s master a %5s %10s\n", + "%20s %5s %3s master a %5s %10s %*s\n", "command", "tgid", "dev", "uid", - "magic"); + "magic", + DRM_CLIENT_NAME_MAX_LEN, + "name"); /* dev->filelist is sorted youngest first, but we want to present * oldest first (i.e. kernel, servers, clients), so walk backwardss. @@ -94,19 +95,23 @@ static int drm_clients_info(struct seq_file *m, void *data) struct task_struct *task; struct pid *pid; + mutex_lock(&priv->client_name_lock); rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */ pid = rcu_dereference(priv->pid); task = pid_task(pid, PIDTYPE_TGID); uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID; - seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n", + seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n", task ? task->comm : "<unknown>", pid_vnr(pid), priv->minor->index, is_current_master ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), uid), - priv->magic); + priv->magic, + DRM_CLIENT_NAME_MAX_LEN, + priv->client_name ? priv->client_name : "<unset>"); rcu_read_unlock(); + mutex_unlock(&priv->client_name_lock); } mutex_unlock(&dev->filelist_mutex); return 0; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ac30b0ec9d93..c2c172eb25df 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -38,7 +38,7 @@ #include <drm/drm_accel.h> #include <drm/drm_cache.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 29c53f9f449c..c9008113111b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init); * @fb_helper: driver-allocated fbdev helper * * A helper to alloc fb_info and the member cmap. Called by the driver - * within the fb_probe fb_helper callback function. Drivers do not - * need to release the allocated fb_info structure themselves, this is + * within the struct &drm_driver.fbdev_probe callback function. Drivers do + * not need to release the allocated fb_info structure themselves, this is * automatically done when calling drm_fb_helper_fini(). * * RETURNS: @@ -554,7 +554,7 @@ EXPORT_SYMBOL(drm_fb_helper_release_info); /** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device - * @fb_helper: driver-allocated fbdev helper, can be NULL + * @fb_helper: driver-allocated fbdev helper, must not be NULL * * A wrapper around unregister_framebuffer, to release the fb_info * framebuffer device. This must be called before releasing all resources for @@ -562,8 +562,12 @@ EXPORT_SYMBOL(drm_fb_helper_release_info); */ void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) { - if (fb_helper && fb_helper->info) - unregister_framebuffer(fb_helper->info); + struct fb_info *info = fb_helper->info; + struct device *dev = info->device; + + if (dev_is_pci(dev)) + vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL); + unregister_framebuffer(fb_helper->info); } EXPORT_SYMBOL(drm_fb_helper_unregister_info); @@ -693,6 +697,7 @@ void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u3 } EXPORT_SYMBOL(drm_fb_helper_damage_area); +#ifdef CONFIG_FB_DEFERRED_IO /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer @@ -736,6 +741,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli } } EXPORT_SYMBOL(drm_fb_helper_deferred_io); +#endif /** * drm_fb_helper_set_suspend - wrapper around fb_set_suspend @@ -1441,67 +1447,27 @@ unlock: EXPORT_SYMBOL(drm_fb_helper_pan_display); static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats, - size_t format_count, uint32_t bpp, uint32_t depth) + size_t format_count, unsigned int color_mode) { struct drm_device *dev = fb_helper->dev; uint32_t format; size_t i; - /* - * Do not consider YUV or other complicated formats - * for framebuffers. This means only legacy formats - * are supported (fmt->depth is a legacy field), but - * the framebuffer emulation can only deal with such - * formats, specifically RGB/BGA formats. - */ - format = drm_mode_legacy_fb_format(bpp, depth); - if (!format) - goto err; + format = drm_driver_color_mode_format(dev, color_mode); + if (!format) { + drm_info(dev, "unsupported color mode of %d\n", color_mode); + return DRM_FORMAT_INVALID; + } for (i = 0; i < format_count; ++i) { if (formats[i] == format) return format; } - -err: - /* We found nothing. */ - drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth); + drm_warn(dev, "format %p4cc not supported\n", &format); return DRM_FORMAT_INVALID; } -static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper, - const uint32_t *formats, size_t format_count, - unsigned int color_mode) -{ - struct drm_device *dev = fb_helper->dev; - uint32_t bpp, depth; - - switch (color_mode) { - case 1: - case 2: - case 4: - case 8: - case 16: - case 24: - bpp = depth = color_mode; - break; - case 15: - bpp = 16; - depth = 15; - break; - case 32: - bpp = 32; - depth = 24; - break; - default: - drm_info(dev, "unsupported color mode of %d\n", color_mode); - return DRM_FORMAT_INVALID; - } - - return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth); -} - static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { @@ -1531,10 +1497,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, if (!cmdline_mode->bpp_specified) continue; - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - cmdline_mode->bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + cmdline_mode->bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1544,10 +1510,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, break; /* found supported format */ /* try preferred color mode */ - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - fb_helper->preferred_bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + fb_helper->preferred_bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1648,13 +1614,14 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, /* * Allocates the backing storage and sets up the fbdev info structure through - * the ->fb_probe callback. + * the ->fbdev_probe callback. */ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_surface_size sizes; + struct fb_info *info; int ret; ret = drm_fb_helper_find_sizes(fb_helper, &sizes); @@ -1666,15 +1633,20 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) } /* push down into drivers */ - ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); + if (dev->driver->fbdev_probe) + ret = dev->driver->fbdev_probe(fb_helper, &sizes); + else if (fb_helper->funcs) + ret = fb_helper->funcs->fb_probe(fb_helper, &sizes); if (ret < 0) return ret; strcpy(fb_helper->fb->comm, "[fbcon]"); + info = fb_helper->info; + /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */ - if (dev_is_pci(dev->dev)) - vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info); + if (dev_is_pci(info->device)) + vga_switcheroo_client_fb_set(to_pci_dev(info->device), info); return 0; } @@ -1738,7 +1710,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info, * instance and the drm framebuffer allocated in &drm_fb_helper.fb. * * Drivers should call this (or their equivalent setup code) from their - * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev + * &drm_driver.fbdev_probe callback after having allocated the fbdev * backing storage framebuffer. */ void drm_fb_helper_fill_info(struct fb_info *info, @@ -1894,7 +1866,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) * Note that this also registers the fbdev and so allows userspace to call into * the driver through the fbdev interfaces. * - * This function will call down into the &drm_fb_helper_funcs.fb_probe callback + * This function will call down into the &drm_driver.fbdev_probe callback * to let the driver allocate and initialize the fbdev info structure and the * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided * as a helper to setup simple default values for the fbdev info structure. diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/drm_fbdev_client.c new file mode 100644 index 000000000000..246fb63ab250 --- /dev/null +++ b/drivers/gpu/drm/drm_fbdev_client.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: MIT + +#include <drm/drm_client.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fbdev_client.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> + +/* + * struct drm_client_funcs + */ + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->info) { + drm_fb_helper_unregister_info(fb_helper); + } else { + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + } +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err_drm_err; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper); + if (ret) + goto err_drm_fb_helper_fini; + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_drm_err: + drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret); + return ret; +} + +static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, true); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, true); + + return 0; +} + +static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (holds_console_lock) + drm_fb_helper_set_suspend(fb_helper, false); + else + drm_fb_helper_set_suspend_unlocked(fb_helper, false); + + return 0; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, + .suspend = drm_fbdev_client_suspend, + .resume = drm_fbdev_client_resume, +}; + +/** + * drm_fbdev_client_setup() - Setup fbdev emulation + * @dev: DRM device + * @format: Preferred color format for the device. DRM_FORMAT_XRGB8888 + * is used if this is zero. + * + * This function sets up fbdev emulation. Restore, hotplug events and + * teardown are all taken care of. Drivers that do suspend/resume need + * to call drm_client_dev_suspend() and drm_client_dev_resume() by + * themselves. Simple drivers might use drm_mode_config_helper_suspend(). + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * The fbdev client is destroyed by drm_dev_unregister(). + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + struct drm_fb_helper *fb_helper; + unsigned int color_mode; + int ret; + + /* TODO: Use format info throughout DRM */ + if (format) { + unsigned int bpp = drm_format_info_bpp(format, 0); + + switch (bpp) { + case 16: + color_mode = format->depth; // could also be 15 + break; + default: + color_mode = bpp; + } + } else { + switch (dev->mode_config.preferred_depth) { + case 0: + case 24: + color_mode = 32; + break; + default: + color_mode = dev->mode_config.preferred_depth; + } + } + + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return -ENOMEM; + drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL); + + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + drm_err(dev, "Failed to register client: %d\n", ret); + goto err_drm_client_init; + } + + drm_client_register(&fb_helper->client); + + return 0; + +err_drm_client_init: + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + return ret; +} +EXPORT_SYMBOL(drm_fbdev_client_setup); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 51c2d742d199..b14b581c059d 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -2,15 +2,13 @@ #include <linux/fb.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> -#include <drm/drm_fbdev_dma.h> - /* * struct fb_ops */ @@ -103,8 +101,35 @@ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = { * struct drm_fb_helper */ -static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { + .fb_dirty = drm_fbdev_dma_helper_fb_dirty, +}; + +/* + * struct drm_fb_helper + */ + +int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; @@ -148,6 +173,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_client_buffer_delete; } + fb_helper->funcs = &drm_fbdev_dma_helper_funcs; fb_helper->buffer = buffer; fb_helper->fb = fb; @@ -211,136 +237,4 @@ err_drm_client_buffer_delete: drm_client_framebuffer_delete(buffer); return ret; } - -static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { - .fb_probe = drm_fbdev_dma_helper_fb_probe, - .fb_dirty = drm_fbdev_dma_helper_fb_dirty, -}; - -/* - * struct drm_client_funcs - */ - -static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_dma_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs drm_fbdev_dma_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_dma_client_unregister, - .restore = drm_fbdev_dma_client_restore, - .hotplug = drm_fbdev_dma_client_hotplug, -}; - -/** - * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * 32 is used if this is zero. - * - * This function sets up fbdev emulation for GEM DMA drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_dma_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} -EXPORT_SYMBOL(drm_fbdev_dma_setup); +EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index 0c785007f11b..f824369baacd 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -2,15 +2,13 @@ #include <linux/fb.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> -#include <drm/drm_fbdev_shmem.h> - /* * struct fb_ops */ @@ -105,8 +103,35 @@ static struct page *drm_fbdev_shmem_get_page(struct fb_info *info, unsigned long * struct drm_fb_helper */ -static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = { + .fb_dirty = drm_fbdev_shmem_helper_fb_dirty, +}; + +/* + * struct drm_driver + */ + +int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; @@ -139,6 +164,7 @@ static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_client_buffer_delete; } + fb_helper->funcs = &drm_fbdev_shmem_helper_funcs; fb_helper->buffer = buffer; fb_helper->fb = fb; @@ -182,136 +208,4 @@ err_drm_client_buffer_delete: drm_client_framebuffer_delete(buffer); return ret; } - -static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = { - .fb_probe = drm_fbdev_shmem_helper_fb_probe, - .fb_dirty = drm_fbdev_shmem_helper_fb_dirty, -}; - -/* - * struct drm_client_funcs - */ - -static void drm_fbdev_shmem_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_shmem_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int drm_fbdev_shmem_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev-shmem: Failed to setup emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs drm_fbdev_shmem_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_shmem_client_unregister, - .restore = drm_fbdev_shmem_client_restore, - .hotplug = drm_fbdev_shmem_client_hotplug, -}; - -/** - * drm_fbdev_shmem_setup() - Setup fbdev emulation for GEM SHMEM helpers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * 32 is used if this is zero. - * - * This function sets up fbdev emulation for GEM DMA drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_shmem_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_shmem_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_shmem_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_shmem_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} -EXPORT_SYMBOL(drm_fbdev_shmem_setup); +EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 119ffb28aaf9..73d35d59590c 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -65,79 +65,6 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = { .fb_destroy = drm_fbdev_ttm_fb_destroy, }; -/* - * This function uses the client API to create a framebuffer backed by a dumb buffer. - */ -static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_client_dev *client = &fb_helper->client; - struct drm_device *dev = fb_helper->dev; - struct drm_client_buffer *buffer; - struct fb_info *info; - size_t screen_size; - void *screen_buffer; - u32 format; - int ret; - - drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, - sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, - sizes->surface_height, format); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - - fb_helper->buffer = buffer; - fb_helper->fb = buffer->fb; - - screen_size = buffer->gem->size; - screen_buffer = vzalloc(screen_size); - if (!screen_buffer) { - ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; - } - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; - } - - drm_fb_helper_fill_info(info, fb_helper, sizes); - - info->fbops = &drm_fbdev_ttm_fb_ops; - - /* screen */ - info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; - info->screen_buffer = screen_buffer; - info->fix.smem_len = screen_size; - - /* deferred I/O */ - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - goto err_drm_fb_helper_release_info; - - return 0; - -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); -err_vfree: - vfree(screen_buffer); -err_drm_client_framebuffer_delete: - fb_helper->fb = NULL; - fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); - return ret; -} - static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip, struct iosys_map *dst) @@ -236,115 +163,81 @@ static int drm_fbdev_ttm_helper_fb_dirty(struct drm_fb_helper *helper, } static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = { - .fb_probe = drm_fbdev_ttm_helper_fb_probe, .fb_dirty = drm_fbdev_ttm_helper_fb_dirty, }; -static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int drm_fbdev_ttm_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} +/* + * struct drm_driver + */ -static int drm_fbdev_ttm_client_hotplug(struct drm_client_dev *client) +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; + struct drm_client_dev *client = &fb_helper->client; + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer; + struct fb_info *info; + size_t screen_size; + void *screen_buffer; + u32 format; int ret; - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); - return 0; + fb_helper->funcs = &drm_fbdev_ttm_helper_funcs; + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret); - return ret; -} + screen_size = buffer->gem->size; + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) { + ret = -ENOMEM; + goto err_drm_client_framebuffer_delete; + } -static const struct drm_client_funcs drm_fbdev_ttm_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_ttm_client_unregister, - .restore = drm_fbdev_ttm_client_restore, - .hotplug = drm_fbdev_ttm_client_hotplug, -}; + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_vfree; + } -/** - * drm_fbdev_ttm_setup() - Setup fbdev emulation for TTM-based drivers - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * - * This function sets up fbdev emulation for TTM-based drivers that support - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_ttm_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * In order to provide fixed mmap-able memory ranges, fbdev emulation - * uses a shadow buffer in system memory. The implementation blits the shadow - * fbdev buffer onto the real buffer in regular intervals. - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; + drm_fb_helper_fill_info(info, fb_helper, sizes); - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + info->fbops = &drm_fbdev_ttm_fb_ops; - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_ttm_helper_funcs); + /* screen */ + info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + info->screen_buffer = screen_buffer; + info->fix.smem_len = screen_size; - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_ttm_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } + /* deferred I/O */ + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - drm_client_register(&fb_helper->client); + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_drm_fb_helper_release_info; - return; + return 0; -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - return; +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); +err_vfree: + vfree(screen_buffer); +err_drm_client_framebuffer_delete: + fb_helper->fb = NULL; + fb_helper->buffer = NULL; + drm_client_framebuffer_delete(buffer); + return ret; } -EXPORT_SYMBOL(drm_fbdev_ttm_setup); +EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ad1dc638c83b..cb5f22f5bbb6 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -40,7 +40,7 @@ #include <linux/slab.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> @@ -129,7 +129,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) */ struct drm_file *drm_file_alloc(struct drm_minor *minor) { - static atomic64_t ident = ATOMIC_INIT(0); + static atomic64_t ident = ATOMIC64_INIT(0); struct drm_device *dev = minor->dev; struct drm_file *file; int ret; @@ -157,6 +157,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) spin_lock_init(&file->master_lookup_lock); mutex_init(&file->event_read_lock); + mutex_init(&file->client_name_lock); if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_open(dev, file); @@ -258,6 +259,10 @@ void drm_file_free(struct drm_file *file) WARN_ON(!list_empty(&file->event_list)); put_pid(rcu_access_pointer(file->pid)); + + mutex_destroy(&file->client_name_lock); + kfree(file->client_name); + kfree(file); } @@ -950,6 +955,11 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); } + mutex_lock(&file->client_name_lock); + if (file->client_name) + drm_printf(&p, "drm-client-name:\t%s\n", file->client_name); + mutex_unlock(&file->client_name_lock); + if (dev->driver->show_fdinfo) dev->driver->show_fdinfo(&p, file); } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 193cf8ed7912..3a94ca211f9c 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -36,7 +36,6 @@ * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. - * Useful in fbdev emulation code, since that deals in those values. */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { @@ -140,6 +139,35 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, } EXPORT_SYMBOL(drm_driver_legacy_fb_format); +/** + * drm_driver_color_mode_format - Compute DRM 4CC code from color mode + * @dev: DRM device + * @color_mode: command-line color mode + * + * Computes a DRM 4CC pixel format code for the given color mode using + * drm_driver_color_mode(). The color mode is in the format used and the + * kernel command line. It specifies the number of bits per pixel + * and color depth in a single value. + * + * Useful in fbdev emulation code, since that deals in those values. The + * helper does not consider YUV or other complicated formats. This means + * only legacy formats are supported (fmt->depth is a legacy field), but + * the framebuffer emulation can only deal with such formats, specifically + * RGB/BGA formats. + */ +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode) +{ + switch (color_mode) { + case 15: + return drm_driver_legacy_fb_format(dev, 16, 15); + case 32: + return drm_driver_legacy_fb_format(dev, 32, 24); + default: + return drm_driver_legacy_fb_format(dev, color_mode, color_mode); + } +} +EXPORT_SYMBOL(drm_driver_color_mode_format); + /* * Internal function to query information for a given format. See * drm_format_info() for the public API. diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 888aadb6a4ac..47e6e8577b62 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -99,6 +99,7 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, return 0; } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_check_src_coords); /** * drm_mode_addfb - add an FB to the graphics configuration @@ -838,6 +839,7 @@ void drm_framebuffer_free(struct kref *kref) fb->funcs->destroy(fb); } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free); /** * drm_framebuffer_init - initialize a framebuffer diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 149b8e25da5b..ee811764c3df 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -114,22 +114,32 @@ drm_gem_init(struct drm_device *dev) } /** - * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM + * object in a given shmfs mountpoint + * * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size + * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use + * the usual tmpfs mountpoint (`shm_mnt`). * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ -int drm_gem_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) +int drm_gem_object_init_with_mnt(struct drm_device *dev, + struct drm_gem_object *obj, size_t size, + struct vfsmount *gemfs) { struct file *filp; drm_gem_private_object_init(dev, obj, size); - filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (gemfs) + filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, + VM_NORESERVE); + else + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(filp)) return PTR_ERR(filp); @@ -137,6 +147,22 @@ int drm_gem_object_init(struct drm_device *dev, return 0; } +EXPORT_SYMBOL(drm_gem_object_init_with_mnt); + +/** + * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with + * shmfs backing store. + */ +int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, + size_t size) +{ + return drm_gem_object_init_with_mnt(dev, obj, size, NULL); +} EXPORT_SYMBOL(drm_gem_object_init); /** diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 53c003983ad1..8508060a1a95 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -49,7 +49,8 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { }; static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, + struct vfsmount *gemfs) { struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; @@ -76,7 +77,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) drm_gem_private_object_init(dev, obj, size); shmem->map_wc = false; /* dma-buf mappings use always writecombine */ } else { - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); } if (ret) { drm_gem_private_object_fini(obj); @@ -123,11 +124,32 @@ err_free: */ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) { - return __drm_gem_shmem_create(dev, size, false); + return __drm_gem_shmem_create(dev, size, false, NULL); } EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** + * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a + * given mountpoint + * @dev: DRM device + * @size: Size of the object to allocate + * @gemfs: tmpfs mount where the GEM object will be created + * + * This function creates a shmem GEM object in a given tmpfs mountpoint. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. + */ +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs) +{ + return __drm_gem_shmem_create(dev, size, false, gemfs); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); + +/** * drm_gem_shmem_free - Free resources associated with a shmem GEM object * @shmem: shmem GEM object to free * @@ -765,7 +787,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, size_t size = PAGE_ALIGN(attach->dmabuf->size); struct drm_gem_shmem_object *shmem; - shmem = __drm_gem_shmem_create(dev, size, true); + shmem = __drm_gem_shmem_create(dev, size, true, NULL); if (IS_ERR(shmem)) return ERR_CAST(shmem); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 6027584406af..22b1fe9c03b8 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -16,7 +16,6 @@ #include <drm/drm_mode.h> #include <drm/drm_plane.h> #include <drm/drm_prime.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_tt.h> @@ -687,50 +686,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); /* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -/** - * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct - * drm_simple_display_pipe_funcs.prepare_fb - * @pipe: a simple display pipe - * @new_state: the plane's new state - * - * During plane updates, this function pins the GEM VRAM - * objects of the plane's new framebuffer to VRAM. Call - * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. - * - * Returns: - * 0 on success, or - * a negative errno code otherwise. - */ -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state) -{ - return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); - -/** - * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct - * drm_simple_display_pipe_funcs.cleanup_fb - * @pipe: a simple display pipe - * @old_state: the plane's old state - * - * During plane updates, this function unpins the GEM VRAM - * objects of the plane's old framebuffer from VRAM. Complements - * drm_gem_vram_simple_display_pipe_prepare_fb(). - */ -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); - -/* * PRIME helpers */ diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e..b2b6a8e49dda 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -48,6 +48,14 @@ struct drm_prime_file_private; struct drm_printer; struct drm_vblank_crtc; +/* drm_client_event.c */ +#if defined(CONFIG_DRM_CLIENT) +void drm_client_debugfs_init(struct drm_device *dev); +#else +static inline void drm_client_debugfs_init(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 51f39912866f..f593dc569d31 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -540,6 +540,55 @@ int drm_version(struct drm_device *dev, void *data, return err; } +/* + * Check if the passed string contains control char or spaces or + * anything that would mess up a formatted output. + */ +static int drm_validate_value_string(const char *value, size_t len) +{ + int i; + + for (i = 0; i < len; i++) { + if (!isascii(value[i]) || !isgraph(value[i])) + return -EINVAL; + } + return 0; +} + +static int drm_set_client_name(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_set_client_name *name = data; + size_t len = name->name_len; + void __user *user_ptr; + char *new_name; + + if (len > DRM_CLIENT_NAME_MAX_LEN) { + return -EINVAL; + } else if (len) { + user_ptr = u64_to_user_ptr(name->name); + + new_name = memdup_user_nul(user_ptr, len); + if (IS_ERR(new_name)) + return PTR_ERR(new_name); + + if (strlen(new_name) != len || + drm_validate_value_string(new_name, len) < 0) { + kfree(new_name); + return -EINVAL; + } + } else { + new_name = NULL; + } + + mutex_lock(&file_priv->client_name_lock); + kfree(file_priv->client_name); + file_priv->client_name = new_name; + mutex_unlock(&file_priv->client_name_lock); + + return 0; +} + static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { /* ROOT_ONLY is only for CAP_SYS_ADMIN */ @@ -610,6 +659,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_NAME, drm_set_client_name, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER), diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2bc3973d35a1..5e5c5f84daac 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1521,6 +1521,22 @@ void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx, EXPORT_SYMBOL(mipi_dsi_compression_mode_ext_multi); /** + * mipi_dsi_compression_mode_multi() - enable/disable DSC on the peripheral + * @ctx: Context for multiple DSI transactions + * @enable: Whether to enable or disable the DSC + * + * Enable or disable Display Stream Compression on the peripheral using the + * default Picture Parameter Set and VESA DSC 1.1 algorithm. + */ +void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx, + bool enable) +{ + return mipi_dsi_compression_mode_ext_multi(ctx, enable, + MIPI_DSI_COMPRESSION_DSC, 0); +} +EXPORT_SYMBOL(mipi_dsi_compression_mode_multi); + +/** * mipi_dsi_dcs_nop_multi() - send DCS NOP packet * @ctx: Context for multiple DSI transactions * diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 5ace481c1901..ca254611b382 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { } INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, - START, LAST, static inline, drm_mm_interval_tree) + START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) @@ -611,7 +611,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, } EXPORT_SYMBOL(drm_mm_insert_node_in_range); -static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) +static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); } diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index df4cc0e8e263..e943205a2394 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -81,6 +81,7 @@ int drm_mode_object_add(struct drm_device *dev, { return __drm_mode_object_add(dev, obj, obj_type, true, NULL); } +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_mode_object_add); void drm_mode_object_register(struct drm_device *dev, struct drm_mode_object *obj) diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 2c582020cb42..5565464c1734 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -21,7 +21,7 @@ */ #include <drm/drm_atomic_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> @@ -185,7 +185,7 @@ EXPORT_SYMBOL(drm_crtc_init); * Zero on success, negative error code on error. * * See also: - * drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked(). + * drm_kms_helper_poll_disable() and drm_client_dev_suspend(). */ int drm_mode_config_helper_suspend(struct drm_device *dev) { @@ -199,10 +199,11 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + drm_client_dev_suspend(dev, false); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it was never initialized */ @@ -230,7 +231,7 @@ EXPORT_SYMBOL(drm_mode_config_helper_suspend); * Zero on success, negative error code on error. * * See also: - * drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable(). + * drm_client_dev_resume() and drm_kms_helper_poll_enable(). */ int drm_mode_config_helper_resume(struct drm_device *dev) { @@ -247,7 +248,8 @@ int drm_mode_config_helper_resume(struct drm_device *dev) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_client_dev_resume(dev, false); + /* * Don't enable polling if it is not initialized */ diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 177b600895d3..5c2abc9eca9c 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -341,8 +341,23 @@ static int drm_of_lvds_get_remote_pixels_type( return pixels_type; } +static int __drm_of_lvds_get_dual_link_pixel_order(int p1_pt, int p2_pt) +{ + /* + * A valid dual-lVDS bus is found when one port is marked with + * "dual-lvds-even-pixels", and the other port is marked with + * "dual-lvds-odd-pixels", bail out if the markers are not right. + */ + if (p1_pt + p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return -EINVAL; + + return p1_pt == DRM_OF_LVDS_EVEN ? + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; +} + /** - * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order + * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link source pixel order * @port1: First DT port node of the Dual-link LVDS source * @port2: Second DT port node of the Dual-link LVDS source * @@ -387,19 +402,58 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, if (remote_p2_pt < 0) return remote_p2_pt; - /* - * A valid dual-lVDS bus is found when one remote port is marked with - * "dual-lvds-even-pixels", and the other remote port is marked with - * "dual-lvds-odd-pixels", bail out if the markers are not right. - */ - if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return __drm_of_lvds_get_dual_link_pixel_order(remote_p1_pt, remote_p2_pt); +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); + +/** + * drm_of_lvds_get_dual_link_pixel_order_sink - Get LVDS dual-link sink pixel order + * @port1: First DT port node of the Dual-link LVDS sink + * @port2: Second DT port node of the Dual-link LVDS sink + * + * An LVDS dual-link connection is made of two links, with even pixels + * transitting on one link, and odd pixels on the other link. This function + * returns, for two ports of an LVDS dual-link sink, which port shall transmit + * the even and odd pixels, based on the requirements of the sink. + * + * The pixel order is determined from the dual-lvds-even-pixels and + * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those + * properties are not present, or if their usage is not valid, this function + * returns -EINVAL. + * + * If either port is not connected, this function returns -EPIPE. + * + * @port1 and @port2 are typically DT sibling nodes, but may have different + * parents when, for instance, two separate LVDS decoders receive the even and + * odd pixels. + * + * Return: + * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 receives even pixels and @port2 + * receives odd pixels + * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 receives odd pixels and @port2 + * receives even pixels + * * -EINVAL - @port1 or @port2 are NULL + * * -EPIPE - when @port1 or @port2 are not connected + */ +int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2) +{ + int sink_p1_pt, sink_p2_pt; + + if (!port1 || !port2) return -EINVAL; - return remote_p1_pt == DRM_OF_LVDS_EVEN ? - DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : - DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; + sink_p1_pt = drm_of_lvds_get_port_pixels_type(port1); + if (!sink_p1_pt) + return -EPIPE; + + sink_p2_pt = drm_of_lvds_get_port_pixels_type(port2); + if (!sink_p2_pt) + return -EPIPE; + + return __drm_of_lvds_get_dual_link_pixel_order(sink_p1_pt, sink_p2_pt); } -EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order_sink); /** * drm_of_lvds_get_data_mapping - Get LVDS data mapping @@ -410,7 +464,9 @@ EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); * Return: * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18" * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA - data-mapping is "jeida-30" * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24" + * * MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG - data-mapping is "vesa-30" * * -EINVAL - the "data-mapping" property is unsupported * * -ENODEV - the "data-mapping" property is missing */ @@ -427,8 +483,12 @@ int drm_of_lvds_get_data_mapping(const struct device_node *port) return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; if (!strcmp(mapping, "jeida-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + if (!strcmp(mapping, "jeida-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA; if (!strcmp(mapping, "vesa-24")) return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + if (!strcmp(mapping, "vesa-30")) + return MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG; return -EINVAL; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 2d84d7ea1ab7..4a73821b81f6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -184,6 +184,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO AYANEO 2 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* AYA NEO 2021 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"), @@ -196,6 +202,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), }, .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* AYA NEO Founder */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO GEEK */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* AYA NEO NEXT */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 74412b7bf936..0a9ecc1380d2 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -209,6 +209,14 @@ static u32 convert_xrgb8888_to_argb2101010(u32 pix) return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); } +static u32 convert_xrgb8888_to_abgr2101010(u32 pix) +{ + pix = ((pix & 0x00FF0000) >> 14) | + ((pix & 0x0000FF00) << 4) | + ((pix & 0x000000FF) << 22); + return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); +} + /* * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format * @color: input color, in xrgb8888 format @@ -242,6 +250,8 @@ static u32 convert_from_xrgb8888(u32 color, u32 format) return convert_xrgb8888_to_xrgb2101010(color); case DRM_FORMAT_ARGB2101010: return convert_xrgb8888_to_argb2101010(color); + case DRM_FORMAT_ABGR2101010: + return convert_xrgb8888_to_abgr2101010(color); default: WARN_ONCE(1, "Can't convert to %p4cc\n", &format); return 0; diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index 0081190201a7..08cfea04e22b 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -235,6 +235,20 @@ void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf) } EXPORT_SYMBOL(__drm_printfn_err); +void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf) +{ + unsigned int counter = ++p->line.counter; + const char *prefix = p->prefix ?: ""; + const char *pad = p->prefix ? " " : ""; + + if (p->line.series) + drm_printf(p->arg, "%s%s%u.%u: %pV", + prefix, pad, p->line.series, counter, vaf); + else + drm_printf(p->arg, "%s%s%u: %pV", prefix, pad, counter, vaf); +} +EXPORT_SYMBOL(__drm_printfn_line); + /** * drm_puts - print a const string to a &drm_printer stream * @p: the &drm printer diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 92f21764246f..96b266b37ba4 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -33,7 +33,7 @@ #include <linux/moduleparam.h> #include <drm/drm_bridge.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 8e3d2d7060f8..4f2ab8a7b50f 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -712,16 +712,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, int fd, u32 *handle) { struct drm_syncobj *syncobj; - struct fd f = fdget(fd); + CLASS(fd, f)(fd); int ret; - if (!fd_file(f)) + if (fd_empty(f)) return -EINVAL; - if (fd_file(f)->f_op != &drm_syncobj_file_fops) { - fdput(f); + if (fd_file(f)->f_op != &drm_syncobj_file_fops) return -EINVAL; - } /* take a reference to put in the idr */ syncobj = fd_file(f)->private_data; @@ -739,7 +737,6 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, } else drm_syncobj_put(syncobj); - fdput(f); return ret; } diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index a031c335bdb9..33a3c98a962d 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -100,15 +100,9 @@ drm_writeback_fence_get_timeline_name(struct dma_fence *fence) return wb_connector->timeline_name; } -static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence) -{ - return true; -} - static const struct dma_fence_ops drm_writeback_fence_ops = { .get_driver_name = drm_writeback_fence_get_driver_name, .get_timeline_name = drm_writeback_fence_get_timeline_name, - .enable_signaling = drm_writeback_fence_enable_signaling, }; static int create_writeback_properties(struct drm_device *dev) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 384df1659be6..b13a17276d07 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, } else { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | - VIVS_GL_FLUSH_CACHE_COLOR); + VIVS_GL_FLUSH_CACHE_COLOR | + VIVS_GL_FLUSH_CACHE_SHADER_L1); if (has_blt) { CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index 721d633aece9..7aa5f14d0c87 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -5,8 +5,6 @@ #include <linux/dma-mapping.h> -#include <drm/drm_mm.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_gem.h" #include "etnaviv_gpu.h" @@ -55,6 +53,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) return suballoc; free_suballoc: + mutex_destroy(&suballoc->lock); kfree(suballoc); return ERR_PTR(ret); @@ -79,6 +78,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) { dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr); + mutex_destroy(&suballoc->lock); kfree(suballoc); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6500f3999c5f..9b4e2f4b1bc7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -538,6 +538,16 @@ static int etnaviv_bind(struct device *dev) priv->num_gpus = 0; priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(dev)) { + priv->shm_gfp_mask |= GFP_DMA32; + priv->shm_gfp_mask &= ~__GFP_HIGHMEM; + } + priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); @@ -564,6 +574,7 @@ out_unbind: out_destroy_suballoc: etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); out_free_priv: + mutex_destroy(&priv->gem_lock); kfree(priv); out_put: drm_dev_put(drm); @@ -608,7 +619,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; - drm_of_component_match_add(&pdev->dev, &match, + drm_of_component_match_add(dev, &match, component_compare_of, core_node); } } else { @@ -631,9 +642,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) * bit to make sure we are allocating the command buffers and * TLBs in the lower 4 GiB address space. */ - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dev_dbg(&pdev->dev, "No suitable DMA available\n"); + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + dev_dbg(dev, "No suitable DMA available\n"); return -ENODEV; } @@ -644,7 +655,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) */ first_node = etnaviv_of_first_available_node(); if (first_node) { - of_dma_configure(&pdev->dev, first_node, true); + of_dma_configure(dev, first_node, true); of_node_put(first_node); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c0c9d4e3be1..16473c371444 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -514,6 +514,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) etnaviv_obj->ops->release(etnaviv_obj); drm_gem_object_release(obj); + mutex_destroy(&etnaviv_obj->lock); kfree(etnaviv_obj); } @@ -543,7 +544,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .vm_ops = &vm_ops, }; -static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { struct etnaviv_gem_object *etnaviv_obj; @@ -570,6 +571,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, if (!etnaviv_obj) return -ENOMEM; + etnaviv_obj->size = ALIGN(size, SZ_4K); etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; @@ -590,15 +592,13 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj = NULL; int ret; - size = PAGE_ALIGN(size); - - ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj); if (ret) goto fail; lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size)); if (ret) goto fail; @@ -627,7 +627,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, struct drm_gem_object *obj; int ret; - ret = etnaviv_gem_new_impl(dev, flags, ops, &obj); + ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); if (ret) return ret; @@ -686,7 +686,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) kfree(etnaviv_obj->sgt); } if (etnaviv_obj->pages) { - int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT; unpin_user_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a42d260cac2c..687555aae807 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -36,6 +36,11 @@ struct etnaviv_gem_object { const struct etnaviv_gem_ops *ops; struct mutex lock; + /* + * The actual size that is visible to the GPU, not necessarily + * PAGE_SIZE aligned, but should be aligned to GPU page size. + */ + u32 size; u32 flags; struct list_head gem_node; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 3524b5811682..6b98200068e4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -17,7 +17,7 @@ static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + unsigned int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3d0f8d182506..3c0a5c3e0e3d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -6,7 +6,6 @@ #include <drm/drm_file.h> #include <linux/dma-fence-array.h> #include <linux/file.h> -#include <linux/pm_runtime.h> #include <linux/dma-resv.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 7c7f97793ddd..c7d59c06ccd1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -574,8 +574,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) continue; } - /* disable debug registers, as they are not normally needed */ - control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; + /* enable debug register access */ + control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); failed = false; @@ -839,17 +839,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ - ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, - PAGE_SIZE); + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); goto fail; @@ -1330,17 +1321,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); - /* enable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1350,23 +1340,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); + /* enable clock gating */ + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); + val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + for (i = 0; i < submit->nr_pmrs; i++) { const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; *pmr->bo_vma = pmr->sequence; } - - /* disable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - - /* enable clock gating */ - val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); - val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); } @@ -1862,7 +1851,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (!gpu) return -ENOMEM; - gpu->dev = &pdev->dev; + gpu->dev = dev; mutex_init(&gpu->lock); mutex_init(&gpu->sched_lock); @@ -1876,8 +1865,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (gpu->irq < 0) return gpu->irq; - err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, - dev_name(gpu->dev), gpu); + err = devm_request_irq(dev, gpu->irq, irq_handler, 0, + dev_name(dev), gpu); if (err) { dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); return err; @@ -1914,13 +1903,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) * autosuspend delay is rather arbitary: no measurements have * yet been performed to determine an appropriate value. */ - pm_runtime_use_autosuspend(gpu->dev); - pm_runtime_set_autosuspend_delay(gpu->dev, 200); - pm_runtime_enable(gpu->dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 200); + pm_runtime_enable(dev); - err = component_add(&pdev->dev, &gpu_ops); + err = component_add(dev, &gpu_ops); if (err < 0) { - dev_err(&pdev->dev, "failed to register component: %d\n", err); + dev_err(dev, "failed to register component: %d\n", err); return err; } @@ -1929,8 +1918,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { + struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev); + component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); + + mutex_destroy(&gpu->lock); + mutex_destroy(&gpu->sched_lock); } static int etnaviv_gpu_rpm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 31322195b9e4..4d8a7d48ade3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -144,6 +144,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_primid; u32 hangcheck_fence; void __iomem *mmio; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 1661d589bf3e..7e065b3723cf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, struct sg_table *sgt, int prot) -{ struct scatterlist *sg; +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, for_each_sgtable_dma_sg(sgt, sg, i) { phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + unsigned int da_len = sg_dma_len(sg) + sg->offset; + unsigned int bytes = min_t(unsigned int, da_len, va_len); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +108,7 @@ fail: static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -358,7 +346,7 @@ static void etnaviv_iommu_context_free(struct kref *kref) container_of(kref, struct etnaviv_iommu_context, refcount); etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); - + mutex_destroy(&context->lock); context->global->ops->free(context); } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index c01a147f0dfd..7f8ac0178547 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -61,7 +61,6 @@ struct etnaviv_iommu_global { /* P(age) T(able) A(rray) */ u64 *pta_cpu; dma_addr_t pta_dma; - struct spinlock pta_lock; DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES); } v2; }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index dc9dea664a28..d53a5c293373 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -62,6 +62,8 @@ static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += perf_reg_read(gpu, domain, signal); @@ -81,6 +83,8 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, u32 value = 0; unsigned i; + lockdep_assert_held(&gpu->lock); + for (i = 0; i < gpu->identity.pixel_pipes; i++) { pipe_select(gpu, clock, i); value += gpu_read(gpu, signal->data); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index ab9ca4824b62..5b67eda122db 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -11,6 +11,7 @@ #include "etnaviv_gpu.h" #include "etnaviv_sched.h" #include "state.xml.h" +#include "state_hi.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -35,7 +36,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; - u32 dma_addr; + u32 dma_addr, primid = 0; int change; /* @@ -52,10 +53,22 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; + if (submit->exec_state == ETNA_PIPE_3D) { + /* guard against concurrent usage from perfmon_sample */ + mutex_lock(&gpu->lock); + gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, + VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM << + VIVS_MC_PROFILE_CONFIG0_FE__SHIFT); + primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); + mutex_unlock(&gpu->lock); + } if (gpu->state == ETNA_GPU_STATE_RUNNING && (gpu->completed_fence != gpu->hangcheck_fence || - change < 0 || change > 16)) { + change < 0 || change > 16 || + (submit->exec_state == ETNA_PIPE_3D && + gpu->hangcheck_primid != primid))) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_primid = primid; gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } @@ -72,7 +85,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); - drm_sched_start(&gpu->sched); + drm_sched_start(&gpu->sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 829bc528e618..f7bc5f6e20ff 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state.xml ( 29355 bytes, from 2024-01-19 10:18:54) -- common.xml ( 35664 bytes, from 2023-12-06 10:55:32) -- common_3d.xml ( 15069 bytes, from 2023-11-22 10:05:24) -- state_hi.xml ( 35854 bytes, from 2023-12-11 15:50:17) -- copyright.xml ( 1597 bytes, from 2016-11-10 13:58:32) -- state_2d.xml ( 52271 bytes, from 2023-06-02 12:35:03) -- state_3d.xml ( 89522 bytes, from 2024-01-19 10:18:54) -- state_blt.xml ( 14592 bytes, from 2023-11-22 10:05:09) -- state_vg.xml ( 5975 bytes, from 2016-11-10 13:58:32) - -Copyright (C) 2012-2023 by the following authors: +- state.xml ( 30729 bytes, from 2024-06-21 11:31:54) +- common.xml ( 35664 bytes, from 2023-12-13 09:33:18) +- common_3d.xml ( 15069 bytes, from 2023-12-13 09:33:18) +- state_hi.xml ( 35909 bytes, from 2024-06-21 11:31:54) +- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03) +- state_2d.xml ( 52271 bytes, from 2023-05-30 20:50:02) +- state_3d.xml ( 89626 bytes, from 2024-06-21 11:32:57) +- state_blt.xml ( 14592 bytes, from 2023-12-13 09:33:18) +- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03) + +Copyright (C) 2012-2024 by the following authors: - Wladimir J. van der Laan <laanwj@gmail.com> - Christian Gmeiner <christian.gmeiner@gmail.com> - Lucas Stach <l.stach@pengutronix.de> @@ -467,6 +467,7 @@ DEALINGS IN THE SOFTWARE. #define VIVS_MC_PROFILE_CONFIG0 0x00000470 #define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff #define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM 0x00000009 #define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a #define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b #define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 733b109a5095..0d13828e7d9e 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -4,6 +4,7 @@ config DRM_EXYNOS depends on OF && DRM && COMMON_CLK depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on MMU + select DRM_CLIENT_SELECTION select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP select DRM_KMS_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0d185c0564b9..c65364087fac 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,6 +37,24 @@ #define WINDOWS_NR 2 +struct decon_data { + unsigned int vidw_buf_start_base; + unsigned int shadowcon_win_protect_shift; + unsigned int wincon_burstlen_shift; +}; + +static struct decon_data exynos7_decon_data = { + .vidw_buf_start_base = 0x80, + .shadowcon_win_protect_shift = 10, + .wincon_burstlen_shift = 11, +}; + +static struct decon_data exynos7870_decon_data = { + .vidw_buf_start_base = 0x880, + .shadowcon_win_protect_shift = 8, + .wincon_burstlen_shift = 10, +}; + struct decon_context { struct device *dev; struct drm_device *drm_dev; @@ -55,11 +73,19 @@ struct decon_context { wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; + const struct decon_data *data; struct drm_encoder *encoder; }; static const struct of_device_id decon_driver_dt_match[] = { - {.compatible = "samsung,exynos7-decon"}, + { + .compatible = "samsung,exynos7-decon", + .data = &exynos7_decon_data, + }, + { + .compatible = "samsung,exynos7870-decon", + .data = &exynos7870_decon_data, + }, {}, }; MODULE_DEVICE_TABLE(of, decon_driver_dt_match); @@ -81,10 +107,31 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = { DRM_PLANE_TYPE_CURSOR, }; -static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) +/** + * decon_shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @ctx: display and enhancement controller context + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void decon_shadow_protect_win(struct decon_context *ctx, + unsigned int win, bool protect) { - struct decon_context *ctx = crtc->ctx; + u32 bits, val; + unsigned int shift = ctx->data->shadowcon_win_protect_shift; + + bits = SHADOWCON_WINx_PROTECT(shift, win); + + val = readl(ctx->regs + SHADOWCON); + if (protect) + val |= bits; + else + val &= ~bits; + writel(val, ctx->regs + SHADOWCON); +} +static void decon_wait_for_vblank(struct decon_context *ctx) +{ if (ctx->suspended) return; @@ -100,25 +147,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n"); } -static void decon_clear_channels(struct exynos_drm_crtc *crtc) +static void decon_clear_channels(struct decon_context *ctx) { - struct decon_context *ctx = crtc->ctx; unsigned int win, ch_enabled = 0; + u32 val; /* Check if any channel is enabled. */ for (win = 0; win < WINDOWS_NR; win++) { - u32 val = readl(ctx->regs + WINCON(win)); + val = readl(ctx->regs + WINCON(win)); if (val & WINCONx_ENWIN) { + decon_shadow_protect_win(ctx, win, true); + val &= ~WINCONx_ENWIN; writel(val, ctx->regs + WINCON(win)); ch_enabled = 1; + + decon_shadow_protect_win(ctx, win, false); } } + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + /* Wait for vsync, as disable channel takes effect at next vsync */ if (ch_enabled) - decon_wait_for_vblank(ctx->crtc); + decon_wait_for_vblank(ctx); } static int decon_ctx_initialize(struct decon_context *ctx, @@ -126,7 +181,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, { ctx->drm_dev = drm_dev; - decon_clear_channels(ctx->crtc); + decon_clear_channels(ctx); return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } @@ -140,7 +195,7 @@ static void decon_ctx_remove(struct decon_context *ctx) static u32 decon_calc_clkdiv(struct decon_context *ctx, const struct drm_display_mode *mode) { - unsigned long ideal_clk = mode->clock; + unsigned long ideal_clk = mode->clock * 1000; u32 clkdiv; /* Find the clock divider value that gets us closest to ideal_clk */ @@ -263,6 +318,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, { unsigned long val; int padding; + unsigned int shift = ctx->data->wincon_burstlen_shift; val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; @@ -270,44 +326,44 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, switch (fb->format->format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XRGB8888: val |= WINCONx_BPPMODE_24BPP_xRGB; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_XBGR8888: val |= WINCONx_BPPMODE_24BPP_xBGR; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBX8888: val |= WINCONx_BPPMODE_24BPP_RGBx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRX8888: val |= WINCONx_BPPMODE_24BPP_BGRx; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ARGB8888: val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_ABGR8888: val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_RGBA8888: val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; case DRM_FORMAT_BGRA8888: default: val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX | WINCONx_ALPHA_SEL; - val |= WINCONx_BURSTLEN_16WORD; + val |= WINCONx_BURSTLEN_16WORD(shift); break; } @@ -323,8 +379,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width; if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { - val &= ~WINCONx_BURSTLEN_MASK; - val |= WINCONx_BURSTLEN_8WORD; + val &= ~WINCONx_BURSTLEN_MASK(shift); + val |= WINCONx_BURSTLEN_8WORD(shift); } writel(val, ctx->regs + WINCON(win)); @@ -343,28 +399,6 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); } -/** - * decon_shadow_protect_win() - disable updating values from shadow registers at vsync - * - * @ctx: display and enhancement controller context - * @win: window to protect registers for - * @protect: 1 to protect (disable updates) - */ -static void decon_shadow_protect_win(struct decon_context *ctx, - unsigned int win, bool protect) -{ - u32 bits, val; - - bits = SHADOWCON_WINx_PROTECT(win); - - val = readl(ctx->regs + SHADOWCON); - if (protect) - val |= bits; - else - val &= ~bits; - writel(val, ctx->regs + SHADOWCON); -} - static void decon_atomic_begin(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -391,6 +425,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, unsigned int win = plane->index; unsigned int cpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; + unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base; if (ctx->suspended) return; @@ -407,7 +442,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* buffer start address */ val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0); - writel(val, ctx->regs + VIDW_BUF_START(win)); + writel(val, ctx->regs + VIDW_BUF_START(vidw_addr0_base, win)); padding = (pitch / cpp) - fb->width; @@ -689,6 +724,7 @@ static int decon_probe(struct platform_device *pdev) ctx->dev = dev; ctx->suspended = true; + ctx->data = of_device_get_match_data(dev); i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); if (i80_if_timings) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 0ed4f2b8595a..1815374c38df 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -19,9 +19,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, enum exynos_drm_output_type out_type, const struct exynos_drm_crtc_ops *ops, void *context); -void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); -void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, - struct exynos_drm_plane *exynos_plane); /* This function gets crtc device matched with out_type. */ struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 7c59e1164a48..2a466d8179f4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> @@ -111,6 +112,7 @@ static const struct drm_driver exynos_drm_driver = { .dumb_create = exynos_drm_gem_dumb_create, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, + EXYNOS_DRM_FBDEV_DRIVER_OPS, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, @@ -288,7 +290,7 @@ static int exynos_drm_bind(struct device *dev) if (ret < 0) goto err_cleanup_poll; - exynos_drm_fbdev_setup(drm); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index a379c8ca435a..9526a25e90ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -23,7 +23,6 @@ #include "exynos_drm_fbdev.h" #define MAX_CONNECTOR 4 -#define PREFERRED_BPP 32 static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { @@ -87,8 +86,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, return 0; } -static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs exynos_drm_fbdev_helper_funcs = { +}; + +int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct exynos_drm_gem *exynos_gem; struct drm_device *dev = helper->dev; @@ -120,6 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, ret = PTR_ERR(helper->fb); goto err_destroy_gem; } + helper->funcs = &exynos_drm_fbdev_helper_funcs; ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem); if (ret < 0) @@ -134,93 +137,3 @@ err_destroy_gem: exynos_drm_gem_destroy(exynos_gem); return ret; } - -static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { - .fb_probe = exynos_drm_fbdev_create, -}; - -/* - * struct drm_client - */ - -static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = exynos_drm_fbdev_client_unregister, - .restore = exynos_drm_fbdev_client_restore, - .hotplug = exynos_drm_fbdev_client_hotplug, -}; - -void exynos_drm_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 1e1dea627cd9..02a9201abea3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -11,12 +11,17 @@ #ifndef _EXYNOS_DRM_FBDEV_H_ #define _EXYNOS_DRM_FBDEV_H_ -#ifdef CONFIG_DRM_FBDEV_EMULATION -void exynos_drm_fbdev_setup(struct drm_device *dev); +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + +#if defined(CONFIG_DRM_FBDEV_EMULATION) +int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, + struct drm_fb_helper_surface_size *sizes); +#define EXYNOS_DRM_FBDEV_DRIVER_OPS \ + .fbdev_probe = exynos_drm_fbdev_driver_fbdev_probe #else -static inline void exynos_drm_fbdev_setup(struct drm_device *dev) -{ -} +#define EXYNOS_DRM_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 59fa22050717..1ae90ef1fc23 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1286,7 +1286,7 @@ static int gsc_probe(struct platform_device *pdev) return ret; } - /* context initailization */ + /* context initialization */ ctx->id = pdev->id; platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 1e26cd4f8347..c9d4b9146df9 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -883,27 +883,32 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = connector_to_hdmi(connector); - struct edid *edid; + const struct drm_display_info *info = &connector->display_info; + const struct drm_edid *drm_edid; int ret; if (!hdata->ddc_adpt) goto no_edid; - edid = drm_get_edid(connector, hdata->ddc_adpt); - if (!edid) + drm_edid = drm_edid_read_ddc(connector, hdata->ddc_adpt); + + ret = drm_edid_connector_update(connector, drm_edid); + if (ret) + return 0; + + cec_notifier_set_phys_addr(hdata->notifier, info->source_physical_address); + + if (!drm_edid) goto no_edid; - hdata->dvi_mode = !connector->display_info.is_hdmi; + hdata->dvi_mode = !info->is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), - edid->width_cm, edid->height_cm); - - drm_connector_update_edid_property(connector, edid); - cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid); + info->width_mm / 10, info->height_mm / 10); - ret = drm_add_edid_modes(connector, edid); + ret = drm_edid_connector_add_modes(connector); - kfree(edid); + drm_edid_free(drm_edid); return ret; diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h index 5bc5f1db5196..216c106dac8f 100644 --- a/drivers/gpu/drm/exynos/regs-decon7.h +++ b/drivers/gpu/drm/exynos/regs-decon7.h @@ -48,7 +48,7 @@ /* SHADOWCON */ #define SHADOWCON 0x30 -#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win))) +#define SHADOWCON_WINx_PROTECT(_shf, _win) (1 << ((_shf) + (_win))) /* WINCONx */ #define WINCON(_win) (0x50 + ((_win) * 4)) @@ -58,10 +58,9 @@ #define WINCONx_BUFSEL_SHIFT 28 #define WINCONx_TRIPLE_BUF_MODE (0x1 << 18) #define WINCONx_DOUBLE_BUF_MODE (0x0 << 18) -#define WINCONx_BURSTLEN_16WORD (0x0 << 11) -#define WINCONx_BURSTLEN_8WORD (0x1 << 11) -#define WINCONx_BURSTLEN_MASK (0x1 << 11) -#define WINCONx_BURSTLEN_SHIFT 11 +#define WINCONx_BURSTLEN_16WORD(_shf) (0x0 << (_shf)) +#define WINCONx_BURSTLEN_8WORD(_shf) (0x1 << (_shf)) +#define WINCONx_BURSTLEN_MASK(_shf) (0x1 << (_shf)) #define WINCONx_BLD_PLANE (0 << 8) #define WINCONx_BLD_PIX (1 << 8) #define WINCONx_ALPHA_MUL (1 << 7) @@ -89,9 +88,9 @@ #define VIDOSD_H(_x) (0x80 + ((_x) * 4)) /* Frame buffer start addresses: VIDWxxADD0n */ -#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10)) -#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10)) -#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10)) +#define VIDW_BUF_START(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START1(_base, _win) ((_base) + ((_win) * 0x10)) +#define VIDW_BUF_START2(_base, _win) ((_base) + ((_win) * 0x10)) #define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8)) #define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8)) diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index 5ca71ef87325..0e0f910ceb9f 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -3,11 +3,13 @@ config DRM_FSL_DCU tristate "DRM Support for Freescale DCU" depends on DRM && OF && ARM && COMMON_CLK select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS + select MFD_SYSCON if SOC_LS1021A help Choose this option if you have an Freescale DCU chipset. If M is selected the module will be called fsl-dcu-drm. diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index ab6c0c6cd0e2..91a48d774cf7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -19,6 +19,7 @@ #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -100,12 +101,25 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev) static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + struct regmap *scfg; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); - if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); - return ret; + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to initialize mode setting\n"); + + scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg"); + if (PTR_ERR(scfg) != -ENODEV) { + /* + * For simplicity, enable the PIXCLK unconditionally, + * resulting in increased power consumption. Disabling + * the clock in PM or on unload could be implemented as + * a future improvement. + */ + ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN, + SCFG_PIXCLKCR_PXCEN); + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n"); } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); @@ -156,6 +170,7 @@ static const struct drm_driver fsl_dcu_drm_driver = { .load = fsl_dcu_load, .unload = fsl_dcu_unload, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fsl_dcu_drm_fops, .name = "fsl-dcu-drm", .desc = "Freescale DCU DRM", @@ -272,10 +287,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) } fsl_dev->irq = platform_get_irq(pdev, 0); - if (fsl_dev->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (fsl_dev->irq < 0) return fsl_dev->irq; - } fsl_dev->regmap = devm_regmap_init_mmio(dev, base, &fsl_dcu_regmap_config); @@ -333,7 +346,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) if (ret < 0) goto put; - drm_fbdev_dma_setup(drm, legacyfb_depth); + drm_client_setup_with_color_mode(drm, legacyfb_depth); return 0; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index e2049a0e8a92..566396013c04 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -160,6 +160,9 @@ #define FSL_DCU_ARGB4444 12 #define FSL_DCU_YUV422 14 +#define SCFG_PIXCLKCR 0x28 +#define SCFG_PIXCLKCR_PXCEN BIT(31) + #define VF610_LAYER_REG_NUM 9 #define LS1021A_LAYER_REG_NUM 10 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index 9eb5abaf7d66..49bbd00c77ae 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c @@ -29,7 +29,7 @@ void fsl_tcon_bypass_enable(struct fsl_tcon *tcon) FSL_TCON_CTRL1_TCON_BYPASS); } -static struct regmap_config fsl_tcon_regmap_config = { +static const struct regmap_config fsl_tcon_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index efb4a2dd2f80..aa2ea128aa2f 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_GMA500 tristate "Intel GMA500/600/3600/3650 KMS Framebuffer" - depends on DRM && PCI && X86 && MMU + depends on DRM && PCI && X86 && MMU && HAS_IOPORT + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION select I2C diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 98b44974d42d..8edefea2ef59 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -143,12 +143,15 @@ static const struct fb_ops psb_fbdev_fb_ops = { .fb_destroy = psb_fbdev_fb_destroy, }; +static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = { +}; + /* - * struct drm_fb_helper_funcs + * struct drm_driver */ -static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fb_helper->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); @@ -206,6 +209,7 @@ static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper, goto err_drm_gem_object_put; } + fb_helper->funcs = &psb_fbdev_fb_helper_funcs; fb_helper->fb = fb; info = drm_fb_helper_alloc_info(fb_helper); @@ -246,93 +250,3 @@ err_drm_gem_object_put: drm_gem_object_put(obj); return ret; } - -static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = { - .fb_probe = psb_fbdev_fb_probe, -}; - -/* - * struct drm_client_funcs and setup code - */ - -static void psb_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_fb_helper_unprepare(fb_helper); - drm_client_release(&fb_helper->client); - kfree(fb_helper); - } -} - -static int psb_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int psb_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs psb_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = psb_fbdev_client_unregister, - .restore = psb_fbdev_client_restore, - .hotplug = psb_fbdev_client_hotplug, -}; - -void psb_fbdev_setup(struct drm_psb_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->dev; - struct drm_fb_helper *fb_helper; - int ret; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs); - - ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_fb_helper_unprepare; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_fb_helper_unprepare: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index d67c2b3ad901..c419ebbc49ec 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -20,6 +20,7 @@ #include <acpi/video.h> #include <drm/drm.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -475,7 +476,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - psb_fbdev_setup(dev_priv); + drm_client_setup(dev, NULL); return 0; } @@ -507,6 +508,7 @@ static const struct drm_driver driver = { .num_ioctls = ARRAY_SIZE(psb_ioctls), .dumb_create = psb_gem_dumb_create, + PSB_FBDEV_DRIVER_OPS, .ioctls = psb_ioctls, .fops = &psb_gem_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index bddf89b82fec..de62cbfcdc72 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -184,6 +184,9 @@ #define KSEL_BYPASS_25 6 #define KSEL_BYPASS_83_100 7 +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -597,10 +600,13 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, /* fbdev */ #if defined(CONFIG_DRM_FBDEV_EMULATION) -void psb_fbdev_setup(struct drm_psb_private *dev_priv); +int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +#define PSB_FBDEV_DRIVER_OPS \ + .fbdev_probe = psb_fbdev_driver_fbdev_probe #else -static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv) -{ } +#define PSB_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif /* backlight.c */ diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig index 9c1e61f9eec3..b4d2136942f0 100644 --- a/drivers/gpu/drm/gud/Kconfig +++ b/drivers/gpu/drm/gud/Kconfig @@ -4,6 +4,7 @@ config DRM_GUD tristate "GUD USB Display" depends on DRM && USB && MMU select LZ4_COMPRESS + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index ac6bbf920c72..09ccdc1dc1a2 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -376,6 +377,7 @@ static const struct drm_driver gud_drm_driver = { .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gud_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = "gud", .desc = "Generic USB Display", @@ -622,7 +624,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) drm_kms_helper_poll_init(drm); - drm_fbdev_shmem_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 126504318a4f..80253d39664a 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -3,6 +3,7 @@ config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" depends on DRM && PCI && (ARM64 || COMPILE_TEST) depends on MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 9f9b19ea0587..8c488c98ac97 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -11,11 +11,12 @@ * Jianhua Li <lijianhua@huawei.com> */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -63,6 +64,7 @@ static const struct drm_driver hibmc_driver = { .debugfs_init = drm_vram_mm_debugfs_init, .dumb_create = hibmc_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, + DRM_FBDEV_TTM_DRIVER_OPS, }; static int __maybe_unused hibmc_pm_suspend(struct device *dev) @@ -306,7 +308,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, hibmc_driver.name); if (ret) return ret; @@ -339,7 +341,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, goto err_unload; } - drm_fbdev_ttm_setup(dev, 32); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index 0772f79567ef..43e8a4fd2d11 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -2,6 +2,7 @@ config DRM_HISI_KIRIN tristate "DRM Support for Hisilicon Kirin series SoCs Platform" depends on DRM && OF && (ARM64 || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 871f79a6b17e..5616c3917c03 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -25,6 +25,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> @@ -925,6 +926,7 @@ static const struct drm_driver ade_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ade_fops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "kirin", .desc = "Hisilicon Kirin620 SoC DRM Driver", .date = "20150718", diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 12666985686b..86a3a1faff49 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -18,8 +18,8 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> -#include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -237,7 +237,7 @@ static int kirin_drm_bind(struct device *dev) if (ret) goto err_kms_cleanup; - drm_fbdev_dma_setup(drm_dev, 32); + drm_client_setup(drm_dev, NULL); return 0; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index ff93e08d5036..e0953777a206 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -3,13 +3,14 @@ * Copyright 2021 Microsoft */ +#include <linux/aperture.h> #include <linux/efi.h> #include <linux/hyperv.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_gem_shmem_helper.h> @@ -36,6 +37,7 @@ static struct drm_driver hyperv_driver = { .fops = &hv_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static int hyperv_pci_probe(struct pci_dev *pdev, @@ -124,7 +126,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_hv_set_drv_data; } - drm_aperture_remove_framebuffers(&hyperv_driver); + aperture_remove_all_conflicting_devices(hyperv_driver.name); ret = hyperv_setup_vram(hv, hdev); if (ret) @@ -149,7 +151,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_free_mmio; } - drm_fbdev_shmem_setup(dev, 0); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 14ac351fd76d..5e939004b646 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,7 +10,9 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c63fa2133ccb..31710d98cad5 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -51,7 +51,8 @@ i915-y += \ i915-y += \ soc/intel_dram.o \ soc/intel_gmch.o \ - soc/intel_pch.o + soc/intel_pch.o \ + soc/intel_rom.o # core library code i915-y += \ @@ -225,6 +226,7 @@ i915-y += \ display/intel_atomic_plane.o \ display/intel_audio.o \ display/intel_bios.o \ + display/intel_bo.o \ display/intel_bw.o \ display/intel_cdclk.o \ display/intel_color.o \ @@ -242,6 +244,7 @@ i915-y += \ display/intel_display_power_well.o \ display/intel_display_reset.o \ display/intel_display_rps.o \ + display/intel_display_snapshot.o \ display/intel_display_wa.o \ display/intel_dmc.o \ display/intel_dmc_wl.o \ @@ -325,6 +328,7 @@ i915-y += \ display/intel_dp_hdcp.o \ display/intel_dp_link_training.o \ display/intel_dp_mst.o \ + display/intel_dp_test.o \ display/intel_dsi.o \ display/intel_dsi_dcs_backlight.o \ display/intel_dsi_vbt.o \ @@ -335,6 +339,7 @@ i915-y += \ display/intel_lspcon.o \ display/intel_lvds.o \ display/intel_panel.o \ + display/intel_pfit.o \ display/intel_pps.o \ display/intel_qp_tables.o \ display/intel_sdvo.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 526c8c4d7b53..4fbec065d53e 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -19,6 +19,7 @@ #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" +#include "intel_dp_test.h" #include "intel_dpio_phy.h" #include "intel_encoder.h" #include "intel_fifo_underrun.h" @@ -169,13 +170,12 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN; - I915_STATE_WARN(dev_priv, cur_state != state, - "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", - dig_port->base.base.base.id, dig_port->base.base.name, - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", + dig_port->base.base.base.id, dig_port->base.base.name, + str_on_off(state), str_on_off(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -184,9 +184,9 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) struct intel_display *display = &dev_priv->display; bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "eDP PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "eDP PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) @@ -477,12 +477,8 @@ intel_dp_link_down(struct intel_encoder *encoder, msleep(intel_dp->pps.panel_power_down_delay); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_wakeref_t wakeref; - - with_intel_pps_lock(intel_dp, wakeref) - intel_dp->pps.active_pipe = INVALID_PIPE; - } + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_pps_port_disable(encoder, old_crtc_state); } static void g4x_dp_audio_enable(struct intel_encoder *encoder, @@ -694,7 +690,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, with_intel_pps_lock(intel_dp, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_pps_init(encoder, pipe_config); + vlv_pps_port_enable_unlocked(encoder, pipe_config); intel_dp_enable_port(intel_dp, pipe_config); @@ -709,8 +705,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, if (IS_CHERRYVIEW(dev_priv)) lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); - vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), - lane_mask); + vlv_wait_port_ready(display, dp_to_dig_port(intel_dp), lane_mask); } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); @@ -1172,12 +1167,8 @@ intel_dp_hotplug(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum intel_hotplug_state state; - if (intel_dp->compliance.test_active && - intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { - intel_dp_phy_test(encoder); - /* just do the PHY test and nothing else */ + if (intel_dp_test_phy(intel_dp)) return INTEL_HOTPLUG_UNCHANGED; - } state = intel_encoder_hotplug(encoder, connector); @@ -1249,20 +1240,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(enc_to_dig_port(to_intel_encoder(encoder))); } -enum pipe vlv_active_pipe(struct intel_dp *intel_dp) -{ - struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum pipe pipe; - - if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, - encoder->port, &pipe)) - return pipe; - - return INVALID_PIPE; -} - static void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct intel_display *display = to_intel_display(encoder->dev); @@ -1272,13 +1249,10 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder) intel_dp->DP = intel_de_read(display, intel_dp->output_reg); intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_wakeref_t wakeref; - - with_intel_pps_lock(intel_dp, wakeref) - intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); - } + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_pps_pipe_reset(intel_dp); intel_pps_encoder_reset(intel_dp); } diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h index a10638ab749c..c75e64ae79b7 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.h +++ b/drivers/gpu/drm/i915/display/g4x_dp.h @@ -19,7 +19,6 @@ struct intel_encoder; #ifdef I915 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915); -enum pipe vlv_active_pipe(struct intel_dp *intel_dp); void g4x_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, @@ -32,10 +31,6 @@ static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915) { return NULL; } -static inline int vlv_active_pipe(struct intel_dp *intel_dp) -{ - return 0; -} static inline void g4x_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 46f23bdb4c17..d1a7d0d57c6b 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -480,8 +480,8 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -496,7 +496,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, g4x_hdmi_enable_port(encoder, pipe_config); - vlv_wait_port_ready(dev_priv, dig_port, 0x0); + vlv_wait_port_ready(display, dig_port, 0x0); } static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state, @@ -557,9 +557,8 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); chv_phy_pre_encoder_enable(encoder, pipe_config); @@ -573,7 +572,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, g4x_hdmi_enable_port(encoder, pipe_config); - vlv_wait_port_ready(dev_priv, dig_port, 0x0); + vlv_wait_port_ready(display, dig_port, 0x0); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 611a7d6ef80c..34c5d28fc866 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -3,6 +3,8 @@ * Copyright © 2022 Intel Corporation */ +#include <linux/debugfs.h> + #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" @@ -13,6 +15,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 val; @@ -25,16 +28,16 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * This function is called from post_plane_update, which is run after * a vblank wait. */ - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); val = IPS_ENABLE; - if (i915->display.ips.false_color) + if (display->ips.false_color) val |= IPS_FALSE_COLOR; if (IS_BROADWELL(i915)) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, val | IPS_PCODE_CONTROL)); /* @@ -44,7 +47,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * so we need to just enable it and continue on. */ } else { - intel_de_write(i915, IPS_CTL, val); + intel_de_write(display, IPS_CTL, val); /* * The bit only becomes 1 in the next vblank, so this wait here * is essentially intel_wait_for_vblank. If we don't have this @@ -52,14 +55,15 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50)) - drm_err(&i915->drm, + if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50)) + drm_err(display->drm, "Timed out waiting for IPS enable\n"); } } bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); bool need_vblank_wait = false; @@ -68,19 +72,19 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) return need_vblank_wait; if (IS_BROADWELL(i915)) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ - if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100)) - drm_err(&i915->drm, + if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100)) + drm_err(display->drm, "Timed out waiting for IPS disable\n"); } else { - intel_de_write(i915, IPS_CTL, 0); - intel_de_posting_read(i915, IPS_CTL); + intel_de_write(display, IPS_CTL, 0); + intel_de_posting_read(display, IPS_CTL); } /* We need to wait for a vblank before we can disable the plane. */ @@ -186,6 +190,7 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc) bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -193,7 +198,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) if (!hsw_crtc_supports_ips(crtc)) return false; - if (!i915->display.params.enable_ips) + if (!display->params.enable_ips) return false; if (crtc_state->pipe_bpp > 24) @@ -207,7 +212,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) * Should measure whether using a lower cdclk w/o IPS */ if (IS_BROADWELL(i915) && - crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100) + crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100) return false; return true; @@ -257,6 +262,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, void hsw_ips_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -264,7 +270,7 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state) return; if (IS_HASWELL(i915)) { - crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE; + crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE; } else { /* * We cannot readout IPS state on broadwell, set to @@ -278,9 +284,9 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state) static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) { struct intel_crtc *crtc = data; - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - *val = i915->display.ips.false_color; + *val = display->ips.false_color; return 0; } @@ -288,7 +294,7 @@ static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) static int hsw_ips_debugfs_false_color_set(void *data, u64 val) { struct intel_crtc *crtc = data; - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc_state *crtc_state; int ret; @@ -296,7 +302,7 @@ static int hsw_ips_debugfs_false_color_set(void *data, u64 val) if (ret) return ret; - i915->display.ips.false_color = val; + display->ips.false_color = val; crtc_state = to_intel_crtc_state(crtc->base.state); @@ -323,18 +329,19 @@ DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops, static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_wakeref_t wakeref; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", - str_yes_no(i915->display.params.enable_ips)); + str_yes_no(display->params.enable_ips)); - if (DISPLAY_VER(i915) >= 8) { + if (DISPLAY_VER(display) >= 8) { seq_puts(m, "Currently: unknown\n"); } else { - if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE) + if (intel_de_read(display, IPS_CTL) & IPS_ENABLE) seq_puts(m, "Currently: enabled\n"); else seq_puts(m, "Currently: disabled\n"); diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 9447f7229b60..17a1e3801a85 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -416,7 +416,8 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_plane_update_noarm(struct intel_plane *plane, +static void i9xx_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -444,7 +445,8 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane, } } -static void i9xx_plane_update_arm(struct intel_plane *plane, +static void i9xx_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -507,7 +509,8 @@ static void i9xx_plane_update_arm(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } -static void i830_plane_update_arm(struct intel_plane *plane, +static void i830_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -517,11 +520,12 @@ static void i830_plane_update_arm(struct intel_plane *plane, * Additional breakage on i830 causes register reads to return * the last latched value instead of the last written value [ALM026]. */ - i9xx_plane_update_noarm(plane, crtc_state, plane_state); - i9xx_plane_update_arm(plane, crtc_state, plane_state); + i9xx_plane_update_noarm(dsb, plane, crtc_state, plane_state); + i9xx_plane_update_arm(dsb, plane, crtc_state, plane_state); } -static void i9xx_plane_disable_arm(struct intel_plane *plane, +static void i9xx_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -549,7 +553,8 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane, } static void -g4x_primary_async_flip(struct intel_plane *plane, +g4x_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -569,7 +574,8 @@ g4x_primary_async_flip(struct intel_plane *plane, } static void -vlv_primary_async_flip(struct intel_plane *plane, +vlv_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 15cda57fbc91..e3b13886177a 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -7,13 +7,23 @@ #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" +#include "intel_bo.h" #include "intel_display.h" #include "intel_display_trace.h" +#include "intel_fb.h" #include "intel_mchbar_regs.h" #include "intel_wm.h" #include "skl_watermark.h" #include "vlv_sideband.h" +struct intel_watermark_params { + u16 fifo_size; + u16 max_wm; + u8 default_wm; + u8 guard_size; + u8 cacheline_size; +}; + /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; @@ -136,6 +146,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { + struct intel_display *display = &dev_priv->display; bool was_enabled; u32 val; @@ -177,7 +188,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl return false; } - trace_intel_memory_cxsr(dev_priv, was_enabled, enable); + trace_intel_memory_cxsr(display, was_enabled, enable); drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", str_enabled_disabled(enable), @@ -695,6 +706,76 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) } } +static bool i9xx_wm_need_update(const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + /* Update watermarks on tiling or size changes. */ + if (old_plane_state->uapi.visible != new_plane_state->uapi.visible) + return true; + + if (!old_plane_state->hw.fb || !new_plane_state->hw.fb) + return false; + + if (old_plane_state->hw.fb->modifier != new_plane_state->hw.fb->modifier || + old_plane_state->hw.rotation != new_plane_state->hw.rotation || + drm_rect_width(&old_plane_state->uapi.src) != drm_rect_width(&new_plane_state->uapi.src) || + drm_rect_height(&old_plane_state->uapi.src) != drm_rect_height(&new_plane_state->uapi.src) || + drm_rect_width(&old_plane_state->uapi.dst) != drm_rect_width(&new_plane_state->uapi.dst) || + drm_rect_height(&old_plane_state->uapi.dst) != drm_rect_height(&new_plane_state->uapi.dst)) + return true; + + return false; +} + +static void i9xx_wm_compute(struct intel_crtc_state *new_crtc_state, + const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + bool turn_off, turn_on, visible, was_visible, mode_changed; + + mode_changed = intel_crtc_needs_modeset(new_crtc_state); + was_visible = old_plane_state->uapi.visible; + visible = new_plane_state->uapi.visible; + + if (!was_visible && !visible) + return; + + turn_off = was_visible && (!visible || mode_changed); + turn_on = visible && (!was_visible || mode_changed); + + /* FIXME nuke when all wm code is atomic */ + if (turn_on) { + new_crtc_state->update_wm_pre = true; + } else if (turn_off) { + new_crtc_state->update_wm_post = true; + } else if (i9xx_wm_need_update(old_plane_state, new_plane_state)) { + /* FIXME bollocks */ + new_crtc_state->update_wm_pre = true; + new_crtc_state->update_wm_post = true; + } +} + +static int i9xx_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + + i9xx_wm_compute(new_crtc_state, old_plane_state, new_plane_state); + } + + return 0; +} + /* * Documentation says: * "If the line size is small, the TLB fetches can get in the way of the @@ -715,10 +796,11 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) static void g4x_write_wm_values(struct drm_i915_private *dev_priv, const struct g4x_wm_values *wm) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), FW_WM(wm->sr.plane, SR) | @@ -747,10 +829,11 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, static void vlv_write_wm_values(struct drm_i915_private *dev_priv, const struct vlv_wm_values *wm) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | @@ -1276,6 +1359,22 @@ out: return 0; } +static int g4x_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = g4x_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = g4x_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + static void g4x_merge_wm(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { @@ -1902,6 +2001,22 @@ out: return 0; } +static int vlv_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = vlv_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = vlv_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { @@ -2088,12 +2203,13 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, enum i9xx_plane_id i9xx_plane) { + struct intel_display *display = &i915->display; struct intel_plane *plane; for_each_intel_plane(&i915->drm, plane) { if (plane->id == PLANE_PRIMARY && plane->i9xx_plane == i9xx_plane) - return intel_crtc_for_pipe(i915, plane->pipe); + return intel_crtc_for_pipe(display, plane->pipe); } return NULL; @@ -2172,12 +2288,12 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) crtc = single_enabled_crtc(dev_priv); if (IS_I915GM(dev_priv) && crtc) { - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; - obj = intel_fb_obj(crtc->base.primary->state->fb); + obj = intel_fb_bo(crtc->base.primary->state->fb); /* self-refresh seems busted with untiled */ - if (!i915_gem_object_is_tiled(obj)) + if (!intel_bo_is_tiled(obj)) crtc = NULL; } @@ -2878,8 +2994,9 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; - const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; + struct intel_pipe_wm *intermediate = &new_crtc_state->wm.ilk.intermediate; + const struct intel_pipe_wm *optimal = &new_crtc_state->wm.ilk.optimal; + const struct intel_pipe_wm *active = &old_crtc_state->wm.ilk.optimal; int level; /* @@ -2887,25 +3004,29 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = new_crtc_state->wm.ilk.optimal; + *intermediate = *optimal; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state) || state->skip_intermediate_wm) return 0; - a->pipe_enabled |= b->pipe_enabled; - a->sprites_enabled |= b->sprites_enabled; - a->sprites_scaled |= b->sprites_scaled; + intermediate->pipe_enabled |= active->pipe_enabled; + intermediate->sprites_enabled |= active->sprites_enabled; + intermediate->sprites_scaled |= active->sprites_scaled; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { - struct intel_wm_level *a_wm = &a->wm[level]; - const struct intel_wm_level *b_wm = &b->wm[level]; - - a_wm->enable &= b_wm->enable; - a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); - a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); - a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); - a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); + struct intel_wm_level *intermediate_wm = &intermediate->wm[level]; + const struct intel_wm_level *active_wm = &active->wm[level]; + + intermediate_wm->enable &= active_wm->enable; + intermediate_wm->pri_val = max(intermediate_wm->pri_val, + active_wm->pri_val); + intermediate_wm->spr_val = max(intermediate_wm->spr_val, + active_wm->spr_val); + intermediate_wm->cur_val = max(intermediate_wm->cur_val, + active_wm->cur_val); + intermediate_wm->fbc_val = max(intermediate_wm->fbc_val, + active_wm->fbc_val); } /* @@ -2914,19 +3035,35 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, * there's no safe way to transition from the old state to * the new state, so we need to fail the atomic transaction. */ - if (!ilk_validate_pipe_wm(dev_priv, a)) + if (!ilk_validate_pipe_wm(dev_priv, intermediate)) return -EINVAL; /* * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) new_crtc_state->wm.need_postvbl_update = true; return 0; } +static int ilk_compute_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + int ret; + + ret = ilk_compute_pipe_wm(state, crtc); + if (ret) + return ret; + + ret = ilk_compute_intermediate_wm(state, crtc); + if (ret) + return ret; + + return 0; +} + /* * Merge the watermarks from all active pipes for a specific level. */ @@ -3265,7 +3402,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, dev_priv->display.wm.hw = *results; } -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) +bool ilk_disable_cxsr(struct drm_i915_private *dev_priv) { return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } @@ -3716,6 +3853,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -3723,7 +3861,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -3871,6 +4009,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -3878,7 +4017,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -3971,16 +4110,14 @@ static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) } static const struct intel_wm_funcs ilk_wm_funcs = { - .compute_pipe_wm = ilk_compute_pipe_wm, - .compute_intermediate_wm = ilk_compute_intermediate_wm, + .compute_watermarks = ilk_compute_watermarks, .initial_watermarks = ilk_initial_watermarks, .optimize_watermarks = ilk_optimize_watermarks, .get_hw_state = ilk_wm_get_hw_state, }; static const struct intel_wm_funcs vlv_wm_funcs = { - .compute_pipe_wm = vlv_compute_pipe_wm, - .compute_intermediate_wm = vlv_compute_intermediate_wm, + .compute_watermarks = vlv_compute_watermarks, .initial_watermarks = vlv_initial_watermarks, .optimize_watermarks = vlv_optimize_watermarks, .atomic_update_watermarks = vlv_atomic_update_fifo, @@ -3988,26 +4125,29 @@ static const struct intel_wm_funcs vlv_wm_funcs = { }; static const struct intel_wm_funcs g4x_wm_funcs = { - .compute_pipe_wm = g4x_compute_pipe_wm, - .compute_intermediate_wm = g4x_compute_intermediate_wm, + .compute_watermarks = g4x_compute_watermarks, .initial_watermarks = g4x_initial_watermarks, .optimize_watermarks = g4x_optimize_watermarks, .get_hw_state = g4x_wm_get_hw_state_and_sanitize, }; static const struct intel_wm_funcs pnv_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = pnv_update_wm, }; static const struct intel_wm_funcs i965_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i965_update_wm, }; static const struct intel_wm_funcs i9xx_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i9xx_update_wm, }; static const struct intel_wm_funcs i845_wm_funcs = { + .compute_watermarks = i9xx_compute_watermarks, .update_wm = i845_update_wm, }; diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h index de0920730ab2..06ac37c6c94b 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.h +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -13,12 +13,12 @@ struct intel_crtc_state; struct intel_plane_state; #ifdef I915 -bool ilk_disable_lp_wm(struct drm_i915_private *i915); +bool ilk_disable_cxsr(struct drm_i915_private *i915); void ilk_wm_sanitize(struct drm_i915_private *i915); bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); void i9xx_wm_init(struct drm_i915_private *i915); #else -static inline bool ilk_disable_lp_wm(struct drm_i915_private *i915) +static inline bool ilk_disable_cxsr(struct drm_i915_private *i915) { return false; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 293efc1f841d..8a49f499e3fb 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -29,6 +29,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> #include "i915_reg.h" #include "icl_dsi.h" @@ -45,43 +46,44 @@ #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" -static int header_credits_available(struct drm_i915_private *dev_priv, +static int header_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } -static int payload_credits_available(struct drm_i915_private *dev_priv, +static int payload_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; } -static bool wait_for_header_credits(struct drm_i915_private *dev_priv, +static bool wait_for_header_credits(struct intel_display *display, enum transcoder dsi_trans, int hdr_credit) { - if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(header_credits_available(display, dsi_trans) >= hdr_credit, 100)) { - drm_err(&dev_priv->drm, "DSI header credits not released\n"); + drm_err(display->drm, "DSI header credits not released\n"); return false; } return true; } -static bool wait_for_payload_credits(struct drm_i915_private *dev_priv, +static bool wait_for_payload_credits(struct intel_display *display, enum transcoder dsi_trans, int payld_credit) { - if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(payload_credits_available(display, dsi_trans) >= payld_credit, 100)) { - drm_err(&dev_priv->drm, "DSI payload credits not released\n"); + drm_err(display->drm, "DSI payload credits not released\n"); return false; } @@ -98,7 +100,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port) static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -108,8 +110,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for header/payload credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); - wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); + wait_for_payload_credits(display, dsi_trans, MAX_PLOAD_CREDIT); } /* send nop DCS command */ @@ -119,22 +121,22 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) dsi->channel = 0; ret = mipi_dsi_dcs_nop(dsi); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error sending DCS NOP command\n"); } /* wait for header credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); } /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LPTX_IN_PROGRESS), 20)) - drm_err(&dev_priv->drm, "LPTX bit not cleared\n"); + drm_err(display->drm, "LPTX bit not cleared\n"); } } @@ -142,7 +144,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, const struct mipi_dsi_packet *packet) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); const u8 *data = packet->payload; u32 len = packet->payload_length; @@ -150,20 +152,20 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, /* payload queue can accept *256 bytes*, check limit */ if (len > MAX_PLOAD_CREDIT * 4) { - drm_err(&i915->drm, "payload size exceeds max queue limit\n"); + drm_err(display->drm, "payload size exceeds max queue limit\n"); return -EINVAL; } for (i = 0; i < len; i += 4) { u32 tmp = 0; - if (!wait_for_payload_credits(i915, dsi_trans, 1)) + if (!wait_for_payload_credits(display, dsi_trans, 1)) return -EBUSY; for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; - intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXPYLD(dsi_trans), tmp); } return 0; @@ -174,14 +176,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, bool enable_lpdt) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); u32 tmp; - if (!wait_for_header_credits(dev_priv, dsi_trans, 1)) + if (!wait_for_header_credits(display, dsi_trans, 1)) return -EBUSY; - tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_TXHDR(dsi_trans)); if (packet->payload) tmp |= PAYLOAD_PRESENT; @@ -200,15 +202,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT); tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT); tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT); - intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); u32 mode_flags; enum port port; @@ -226,12 +227,13 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) else return; - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); + intel_de_rmw(display, DSI_CMD_FRMCTL(port), 0, + DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp, mask, val; @@ -245,31 +247,31 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | RTERM_SELECT(0x6); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), mask, val); mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK; val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | RCOMP_SCALAR(0x98); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), mask, val); mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK; val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | CURSOR_COEFF(0x3f); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), mask, val); /* Bspec: must not use GRP register for write */ for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), mask, val); } } @@ -277,13 +279,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; /* FIXME: Move all DSS handling to intel_vdsc.c */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); @@ -293,7 +295,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dss_ctl2_reg = DSS_CTL2; } - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); + dss_ctl1 = intel_de_read(display, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -308,19 +310,19 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DL buffer depth exceed max value\n"); dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, + intel_de_rmw(display, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); + intel_de_write(display, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ @@ -341,6 +343,7 @@ static int afe_clk(struct intel_encoder *encoder, static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -360,33 +363,34 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DSI_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DSI_ESC_CLK_DIV(port)); } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DPHY_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port)); } if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8), + intel_de_write(display, ADL_MIPIO_DW(port, 8), esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY); - intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8)); + intel_de_posting_read(display, ADL_MIPIO_DW(port, 8)); } } } -static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, - struct intel_dsi *intel_dsi) +static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi) { + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]); + drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]); intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? @@ -397,15 +401,15 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), 0, COMBO_PHY_MODE_DSI); - get_dsi_io_power_domains(dev_priv, intel_dsi); + get_dsi_io_power_domains(intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) @@ -421,6 +425,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; @@ -429,32 +434,33 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), + LOADGEN_SELECT, 0); for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) || - (DISPLAY_VER(dev_priv) >= 12)) { - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + (DISPLAY_VER(display) >= 12)) { + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); - tmp = intel_de_read(dev_priv, + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); } } @@ -463,17 +469,17 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* @@ -482,14 +488,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * as part of lane phy sequence configuration */ for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); + intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0, + SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ @@ -497,26 +504,26 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); + intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); - if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 500)) - drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n", + drm_err(display->drm, "DDI port:%c buffer idle\n", port_name(port)); } } @@ -525,6 +532,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -532,12 +540,12 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), + intel_de_write(display, DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* Program DPHY data lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), + intel_de_write(display, DPHY_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* @@ -546,10 +554,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, * a value '0' inside TA_PARAM_REGISTERS otherwise * leave all fields at HW default values. */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), + intel_de_rmw(display, DPHY_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -557,7 +565,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), + intel_de_rmw(display, ICL_DPHY_CHKN(phy), 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } @@ -566,30 +574,30 @@ static void gen11_dsi_setup_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + intel_de_rmw(display, ICL_DSI_T_INIT_MASTER(port), DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), + intel_de_write(display, DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), + intel_de_write(display, DSI_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* shadow register inside display core */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + intel_de_rmw(display, DSI_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -599,45 +607,45 @@ gen11_dsi_setup_timings(struct intel_encoder *encoder, static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); bool clock_enabled = false; enum phy phy; u32 tmp; - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy))) @@ -650,36 +658,36 @@ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; - mutex_lock(&dev_priv->display.dpll.lock); + mutex_lock(&display->dpll.lock); - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + val = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); - intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); + intel_de_posting_read(display, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = crtc->pipe; @@ -689,7 +697,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); if (intel_dsi->eotp_pkt) tmp &= ~EOTP_DISABLED; @@ -745,7 +753,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, } } - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (is_vid_mode(intel_dsi)) tmp |= BLANKING_PACKET_ENABLE; } @@ -778,15 +786,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, tmp |= TE_SOURCE_GPIO; } - intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + intel_de_write(display, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); } /* enable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), 0, PORT_SYNC_MODE_ENABLE); } @@ -798,8 +806,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* select data lane width */ - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); tmp &= ~DDI_PORT_WIDTH_MASK; tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); @@ -825,16 +833,16 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* enable DDI buffer */ tmp |= TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), tmp); + intel_de_write(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), tmp); } /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) & LINK_READY), 2500)) - drm_err(&dev_priv->drm, "DSI link not ready\n"); + drm_err(display->drm, "DSI link not ready\n"); } } @@ -842,7 +850,7 @@ static void gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -909,17 +917,17 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* minimum hactive as per bspec: 256 pixels */ if (adjusted_mode->crtc_hdisplay < 256) - drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n"); + drm_err(display->drm, "hactive is less then 256 pixels\n"); /* if RGB666 format, then hactive must be multiple of 4 pixels */ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hactive pixels are not multiple of 4\n"); /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_HTOTAL(display, dsi_trans), HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } @@ -928,12 +936,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hsync size < 16 pixels\n"); } if (hback_porch < 16) - drm_err(&dev_priv->drm, "hback porch < 16 pixels\n"); + drm_err(display->drm, "hback porch < 16 pixels\n"); if (intel_dsi->dual_link) { hsync_start /= 2; @@ -942,8 +950,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_HSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_HSYNC(display, dsi_trans), HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } @@ -957,22 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_VTOTAL(display, dsi_trans), VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) - drm_err(&dev_priv->drm, "Invalid vsync_end value\n"); + drm_err(display->drm, "Invalid vsync_end value\n"); if (vsync_start < vactive) - drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); + drm_err(display->drm, "vsync_start less than vactive\n"); /* program TRANS_VSYNC register for video mode only */ if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNC(display, dsi_trans), VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } @@ -986,8 +994,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNCSHIFT(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNCSHIFT(display, dsi_trans), vsync_shift); } } @@ -998,11 +1006,11 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * FIXME get rid of these local hacks and do it right, * this will not handle eg. delayed vblank correctly. */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VBLANK(display, dsi_trans), VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } @@ -1010,20 +1018,20 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), 0, + intel_de_rmw(display, TRANSCONF(display, dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 10)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI transcoder not enabled\n"); } } @@ -1031,7 +1039,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1055,21 +1063,21 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), + intel_de_rmw(display, DSI_HSTX_TO(dsi_trans), HSTX_TIMEOUT_VALUE_MASK, HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), + intel_de_rmw(display, DSI_LPRX_HOST_TO(dsi_trans), LPRX_TIMEOUT_VALUE_MASK, LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), + intel_de_rmw(display, DSI_TA_TO(dsi_trans), TA_TIMEOUT_VALUE_MASK, TA_TIMEOUT_VALUE(ta_timeout)); } @@ -1078,7 +1086,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; @@ -1090,7 +1098,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B))) return; - tmp = intel_de_read(dev_priv, UTIL_PIN_CTL); + tmp = intel_de_read(display, UTIL_PIN_CTL); if (enable) { tmp |= UTIL_PIN_DIRECTION_INPUT; @@ -1098,7 +1106,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, } else { tmp &= ~UTIL_PIN_ENABLE; } - intel_de_write(dev_priv, UTIL_PIN_CTL, tmp); + intel_de_write(display, UTIL_PIN_CTL, tmp); } static void @@ -1136,7 +1144,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -1152,14 +1160,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) * FIXME: This uses the number of DW's currently in the payload * receive queue. This is probably not what we want here. */ - tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_RXCTL(dsi_trans)); tmp &= NUMBER_RX_PLOAD_DW_MASK; /* multiply "Number Rx Payload DW" by 4 to get max value */ tmp = tmp * 4; dsi = intel_dsi->dsi_hosts[port]->device; ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error setting max return pkt size%d\n", tmp); } @@ -1219,10 +1227,10 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B) + intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_KVMR_PIPE_A, enable ? IGNORE_KVMR_PIPE_A : 0); } @@ -1235,13 +1243,13 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, */ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - if (DISPLAY_VER(i915) == 13) { + if (DISPLAY_VER(display) == 13) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), + intel_de_rmw(display, TGL_DSI_CHKN_REG(port), TGL_DSI_CHKN_LSHS_GB_MASK, TGL_DSI_CHKN_LSHS_GB(4)); } @@ -1275,7 +1283,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1284,13 +1292,13 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), + intel_de_rmw(display, TRANSCONF(display, dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI trancoder not disabled\n"); } } @@ -1307,7 +1315,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1316,29 +1324,29 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), + intel_de_rmw(display, DSI_CMD_FRMCTL(port), DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)); + tmp = intel_de_read(display, DSI_LP_MSG(dsi_trans)); tmp |= LINK_ENTER_ULPS; tmp &= ~LINK_ULPS_TYPE_LP11; - intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp); + intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp); - if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LINK_IN_ULPS), 10)) - drm_err(&dev_priv->drm, "DSI link not in ULPS\n"); + drm_err(display->drm, "DSI link not in ULPS\n"); } /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), TRANS_DDI_FUNC_ENABLE, 0); } @@ -1346,8 +1354,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), PORT_SYNC_MODE_ENABLE, 0); } } @@ -1355,18 +1363,18 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) static void gen11_dsi_disable_port(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); + intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); - if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DDI port:%c buffer not idle\n", port_name(port)); } @@ -1375,6 +1383,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -1392,7 +1401,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), COMBO_PHY_MODE_DSI, 0); } @@ -1504,8 +1513,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans; u32 val; @@ -1514,7 +1522,7 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) else dsi_trans = TRANSCODER_DSI_0; - val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); } @@ -1557,7 +1565,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, static void gen11_dsi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *intel_crtc; enum pipe pipe; @@ -1568,9 +1576,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, pipe = intel_crtc->pipe; /* wa verify 1409054076:icl,jsl,ehl */ - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && - !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) - drm_dbg_kms(&dev_priv->drm, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B && + !(intel_de_read(display, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n", encoder->base.base.id, encoder->base.name); @@ -1579,9 +1587,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10; + int dsc_max_bpc = DISPLAY_VER(display) >= 12 ? 12 : 10; bool use_dsc; int ret; @@ -1606,12 +1614,12 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, return ret; /* DSI specific sanity checks on the common code */ - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->vbr_enable); + drm_WARN_ON(display->drm, vdsc_cfg->simple_422); + drm_WARN_ON(display->drm, vdsc_cfg->pic_width % vdsc_cfg->slice_width); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->slice_height < 8); + drm_WARN_ON(display->drm, vdsc_cfg->pic_height % vdsc_cfg->slice_height); ret = drm_dsc_compute_rc_parameters(vdsc_cfg); @@ -1627,7 +1635,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = @@ -1661,7 +1669,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) - drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n"); + drm_dbg_kms(display->drm, "Attempting to use DSC failed\n"); pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; @@ -1679,15 +1687,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - get_dsi_io_power_domains(i915, - enc_to_intel_dsi(encoder)); + get_dsi_io_power_domains(enc_to_intel_dsi(encoder)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; @@ -1703,8 +1709,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: *pipe = PIPE_A; @@ -1719,11 +1725,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, *pipe = PIPE_D; break; default: - drm_err(&dev_priv->drm, "Invalid PIPE input\n"); + drm_err(display->drm, "Invalid PIPE input\n"); goto out; } - tmp = intel_de_read(dev_priv, TRANSCONF(dev_priv, dsi_trans)); + tmp = intel_de_read(display, TRANSCONF(display, dsi_trans)); ret = tmp & TRANSCONF_ENABLE; } out: @@ -1833,8 +1839,7 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns; @@ -1858,7 +1863,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n", prepare_cnt); prepare_cnt = ICL_PREPARE_CNT_MAX; } @@ -1867,7 +1872,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - ths_prepare_ns, tlpx_ns); if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } @@ -1875,7 +1880,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* trail cnt in escape clocks*/ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); if (trail_cnt > ICL_TRAIL_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n", trail_cnt); trail_cnt = ICL_TRAIL_CNT_MAX; } @@ -1883,7 +1888,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* tclk pre count in escape clocks */ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; } @@ -1892,7 +1897,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - ths_prepare_ns, tlpx_ns); if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n", hs_zero_cnt); hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; } @@ -1900,7 +1905,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* hs exit zero cnt in escape clocks */ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "exit_zero_cnt out of range (%d)\n", exit_zero_cnt); exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; @@ -1942,10 +1947,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector) fixed_mode->vdisplay); } -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { - struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *intel_connector; @@ -1973,7 +1977,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->devdata = devdata; /* register DSI encoder with DRM subsystem */ - drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs, + drm_encoder_init(display->drm, &encoder->base, + &gen11_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; @@ -1998,7 +2003,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->shutdown = intel_dsi_shutdown; /* register DSI connector with DRM subsystem */ - drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs, + drm_connector_init(display->drm, connector, + &gen11_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; @@ -2011,12 +2017,12 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL); - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { - drm_err(&dev_priv->drm, "DSI fixed mode info missing\n"); + drm_err(display->drm, "DSI fixed mode info missing\n"); goto err; } @@ -2029,10 +2035,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { @@ -2046,7 +2052,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - drm_dbg_kms(&dev_priv->drm, "no device found\n"); + drm_dbg_kms(display->drm, "no device found\n"); goto err; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h index 43fa7d72eeb1..099fc50e35b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.h +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -6,11 +6,11 @@ #ifndef __ICL_DSI_H__ #define __ICL_DSI_H__ -struct drm_i915_private; struct intel_bios_encoder_data; struct intel_crtc_state; +struct intel_display; -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata); void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index 186cf4833f71..55f3ae1e68c9 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -3,6 +3,8 @@ * Copyright 2024, Intel Corporation. */ +#include <linux/debugfs.h> + #include "intel_alpm.h" #include "intel_crtc.h" #include "intel_de.h" @@ -330,7 +332,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines); intel_de_write(display, - PORT_ALPM_CTL(display, port), + PORT_ALPM_CTL(port), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE | PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) | PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | @@ -338,7 +340,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp, intel_dp->alpm_parameters.silence_period_sym_clocks)); intel_de_write(display, - PORT_ALPM_LFPS_CTL(display, port), + PORT_ALPM_LFPS_CTL(port), PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) | PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 12d6ed940751..03dc54c802d3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -266,7 +266,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_pipe = false; crtc_state->update_m_n = false; crtc_state->update_lrr = false; - crtc_state->disable_lp_wm = false; crtc_state->disable_cxsr = false; crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; @@ -277,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->fb_bits = 0; crtc_state->update_planes = 0; crtc_state->dsb_color_vblank = NULL; - crtc_state->dsb_color_commit = NULL; + crtc_state->dsb_commit = NULL; + crtc_state->use_dsb = false; return &crtc_state->uapi; } @@ -312,7 +312,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank); - drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit); + drm_WARN_ON(crtc->dev, crtc_state->dsb_commit); __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index e979786aa5cf..d89630b2d5c1 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,9 +35,10 @@ #include <linux/dma-resv.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> #include "i915_config.h" #include "i9xx_plane_regs.h" @@ -391,28 +392,6 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, plane_state->uapi.visible = false; } -/* FIXME nuke when all wm code is atomic */ -static bool intel_wm_need_update(const struct intel_plane_state *cur, - struct intel_plane_state *new) -{ - /* Update watermarks on tiling or size changes. */ - if (new->uapi.visible != cur->uapi.visible) - return true; - - if (!cur->hw.fb || !new->hw.fb) - return false; - - if (cur->hw.fb->modifier != new->hw.fb->modifier || - cur->hw.rotation != new->hw.rotation || - drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || - drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || - drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || - drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) - return true; - - return false; -} - static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state) { int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; @@ -492,6 +471,61 @@ static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state return old_ctl != new_ctl; } +static bool ilk_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state, + const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) +{ + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + bool old_visible = old_plane_state->uapi.visible; + bool new_visible = new_plane_state->uapi.visible; + bool modeset, turn_on; + + if (plane->id == PLANE_CURSOR) + return false; + + modeset = intel_crtc_needs_modeset(new_crtc_state); + turn_on = new_visible && (!old_visible || modeset); + + /* + * ILK/SNB DVSACNTR/Sprite Enable + * IVB SPR_CTL/Sprite Enable + * "When in Self Refresh Big FIFO mode, a write to enable the + * plane will be internally buffered and delayed while Big FIFO + * mode is exiting." + * + * Which means that enabling the sprite can take an extra frame + * when we start in big FIFO mode (LP1+). Thus we need to drop + * down to LP0 and wait for vblank in order to make sure the + * sprite gets enabled on the next vblank after the register write. + * Doing otherwise would risk enabling the sprite one frame after + * we've already signalled flip completion. We can resume LP1+ + * once the sprite has been enabled. + * + * With experimental results seems this is needed also for primary + * plane, not only sprite plane. + */ + if (turn_on) + return true; + + /* + * WaCxSRDisabledForSpriteScaling:ivb + * IVB SPR_SCALE/Scaling Enable + * "Low Power watermarks must be disabled for at least one + * frame before enabling sprite scaling, and kept disabled + * until sprite scaling is disabled." + * + * ILK/SNB DVSASCALE/Scaling Enable + * "When in Self Refresh Big FIFO mode, scaling enable will be + * masked off while Big FIFO mode is exiting." + * + * Despite the w/a only being listed for IVB we assume that + * the ILK/SNB note has similar ramifications, hence we apply + * the w/a on all three platforms. + */ + return !intel_plane_is_scaled(old_plane_state) && + intel_plane_is_scaled(new_plane_state); +} + static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -546,20 +580,6 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr was_visible, visible, turn_off, turn_on, mode_changed); - if (turn_on) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_pre = true; - } else if (turn_off) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_post = true; - } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - /* FIXME bollocks */ - new_crtc_state->update_wm_pre = true; - new_crtc_state->update_wm_post = true; - } - } - if (visible || was_visible) new_crtc_state->fb_bits |= plane->frontbuffer_bit; @@ -567,45 +587,9 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) new_crtc_state->disable_cxsr = true; - /* - * ILK/SNB DVSACNTR/Sprite Enable - * IVB SPR_CTL/Sprite Enable - * "When in Self Refresh Big FIFO mode, a write to enable the - * plane will be internally buffered and delayed while Big FIFO - * mode is exiting." - * - * Which means that enabling the sprite can take an extra frame - * when we start in big FIFO mode (LP1+). Thus we need to drop - * down to LP0 and wait for vblank in order to make sure the - * sprite gets enabled on the next vblank after the register write. - * Doing otherwise would risk enabling the sprite one frame after - * we've already signalled flip completion. We can resume LP1+ - * once the sprite has been enabled. - * - * - * WaCxSRDisabledForSpriteScaling:ivb - * IVB SPR_SCALE/Scaling Enable - * "Low Power watermarks must be disabled for at least one - * frame before enabling sprite scaling, and kept disabled - * until sprite scaling is disabled." - * - * ILK/SNB DVSASCALE/Scaling Enable - * "When in Self Refresh Big FIFO mode, scaling enable will be - * masked off while Big FIFO mode is exiting." - * - * Despite the w/a only being listed for IVB we assume that - * the ILK/SNB note has similar ramifications, hence we apply - * the w/a on all three platforms. - * - * With experimental results seems this is needed also for primary - * plane, not only sprite plane. - */ - if (plane->id != PLANE_CURSOR && - (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || - IS_IVYBRIDGE(dev_priv)) && - (turn_on || (!intel_plane_is_scaled(old_plane_state) && - intel_plane_is_scaled(new_plane_state)))) - new_crtc_state->disable_lp_wm = true; + if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) && + ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) + new_crtc_state->disable_cxsr = true; if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { new_crtc_state->do_async_flip = true; @@ -710,13 +694,13 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_primary_crtc_plane_state; - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -790,7 +774,8 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -799,10 +784,11 @@ void intel_plane_update_noarm(struct intel_plane *plane, trace_intel_plane_update_noarm(plane, crtc); if (plane->update_noarm) - plane->update_noarm(plane, crtc_state, plane_state); + plane->update_noarm(dsb, plane, crtc_state, plane_state); } -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -810,34 +796,37 @@ void intel_plane_async_flip(struct intel_plane *plane, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_async_flip(plane, crtc, async_flip); - plane->async_flip(plane, crtc_state, plane_state, async_flip); + plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip); } -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->do_async_flip && plane->async_flip) { - intel_plane_async_flip(plane, crtc_state, plane_state, true); + intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true); return; } trace_intel_plane_update_arm(plane, crtc); - plane->update_arm(plane, crtc_state, plane_state); + plane->update_arm(dsb, plane, crtc_state, plane_state); } -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_disable_arm(plane, crtc); - plane->disable_arm(plane, crtc_state); + plane->disable_arm(dsb, plane, crtc_state); } -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -862,11 +851,13 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, /* TODO: for mailbox updates this should be skipped */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + intel_plane_update_noarm(dsb, plane, + new_crtc_state, new_plane_state); } } -static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, +static void skl_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = @@ -893,13 +884,14 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, +static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -919,21 +911,22 @@ static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, * would have to be called here as well. */ if (new_plane_state->uapi.visible) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (DISPLAY_VER(i915) >= 9) - skl_crtc_planes_update_arm(state, crtc); + skl_crtc_planes_update_arm(dsb, state, crtc); else - i9xx_crtc_planes_update_arm(state, crtc); + i9xx_crtc_planes_update_arm(dsb, state, crtc); } int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, @@ -1031,6 +1024,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) */ hsub = 1; vsub = 1; + + /* Wa_16023981245 */ + if ((DISPLAY_VERx100(i915) == 2000 || + DISPLAY_VERx100(i915) == 3000) && + src_x % 2 != 0) + hsub = 2; } else { hsub = fb->format->hsub; vsub = fb->format->vsub; @@ -1114,8 +1113,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); - struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); + struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb); + struct drm_gem_object *old_obj = intel_fb_bo(old_plane_state->hw.fb); int ret; if (old_obj) { @@ -1135,7 +1134,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * can safely continue. */ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { - ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv, + ret = add_dma_resv_fences(old_obj->resv, &new_plane_state->uapi); if (ret < 0) return ret; @@ -1195,7 +1194,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, struct intel_atomic_state *state = to_intel_atomic_state(old_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); + struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb); if (!obj) return; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 6c4fe3596465..0f982f452ff3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -14,6 +14,7 @@ struct drm_rect; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_dsb; struct intel_plane; struct intel_plane_state; enum plane_id; @@ -32,26 +33,32 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsbx, + struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index f5e7eefab2f1..32aa9ec1a204 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -982,12 +982,12 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) { struct intel_display *display = to_intel_display(kdev); struct drm_i915_private *i915 = to_i915(display->drm); - intel_wakeref_t ret; + intel_wakeref_t wakeref; /* Catch potential impedance mismatches before they occur! */ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long)); - ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK); if (i915->display.audio.power_refcount++ == 0) { if (DISPLAY_VER(i915) >= 9) { @@ -1007,7 +1007,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) 0, AUD_PIN_BUF_ENABLE); } - return ret; + return (unsigned long)wakeref; } static void i915_audio_component_put_power(struct device *kdev, @@ -1015,13 +1015,14 @@ static void i915_audio_component_put_power(struct device *kdev, { struct intel_display *display = to_intel_display(kdev); struct drm_i915_private *i915 = to_i915(display->drm); + intel_wakeref_t wakeref = (intel_wakeref_t)cookie; /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--i915->display.audio.power_refcount == 0) if (IS_GEMINILAKE(i915)) glk_force_audio_cdclk(i915, false); - intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, cookie); + intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref); } static void i915_audio_component_codec_wake_override(struct device *kdev, diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9e05745d797d..3f81a726cc7d 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector) else props.power = BACKLIGHT_POWER_OFF; - name = kstrdup_const("intel_backlight", GFP_KERNEL); + name = kstrdup("intel_backlight", GFP_KERNEL); if (!name) return -ENOMEM; @@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector) * compatibility. Use unique names for subsequent backlight devices as a * fallback when the default name already exists. */ - kfree_const(name); + kfree(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) @@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector) connector->base.base.id, connector->base.name, name); out: - kfree_const(name); + kfree(name); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index bed485374ab0..a4cdd82c4a75 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -25,6 +25,7 @@ * */ +#include <linux/debugfs.h> #include <linux/firmware.h> #include <drm/display/drm_dp_helper.h> @@ -32,12 +33,12 @@ #include <drm/drm_edid.h> #include <drm/drm_fixed.h> +#include "soc/intel_rom.h" + #include "i915_drv.h" -#include "i915_reg.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_gmbus.h" -#include "intel_uncore.h" #define _INTEL_BIOS_PRIVATE #include "intel_vbt_defs.h" @@ -1168,7 +1169,6 @@ static int intel_bios_ssc_frequency(struct intel_display *display, static void parse_general_features(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_features *general; general = bdb_find_section(display, BDB_GENERAL_FEATURES); @@ -1178,7 +1178,7 @@ parse_general_features(struct intel_display *display) display->vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ if (display->vbt.version >= 155 && - (HAS_DDI(display) || IS_VALLEYVIEW(i915))) + (HAS_DDI(display) || display->platform.valleyview)) display->vbt.int_crt_support = general->int_crt_support; display->vbt.lvds_use_ssc = general->enable_ssc; display->vbt.lvds_ssc_freq = @@ -1541,7 +1541,6 @@ static void parse_psr(struct intel_display *display, struct intel_panel *panel) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_psr *psr; const struct psr_table *psr_table; int panel_type = panel->vbt.panel_type; @@ -1566,7 +1565,7 @@ parse_psr(struct intel_display *display, * Old decimal value is wake up time in multiples of 100 us. */ if (display->vbt.version >= 205 && - (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) { + (DISPLAY_VER(display) >= 9 && !display->platform.broxton)) { switch (psr_table->tp1_wakeup_time) { case 0: panel->vbt.psr.tp1_wakeup_time_us = 500; @@ -1705,8 +1704,8 @@ parse_mipi_config(struct intel_display *display, return; } - drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n", - panel_type); + drm_dbg_kms(display->drm, "Found MIPI Config block, panel index = %d\n", + panel_type); /* * get hold of the correct configuration block and pps data as per @@ -2028,11 +2027,9 @@ static void icl_fixup_mipi_sequences(struct intel_display *display, static void fixup_mipi_sequences(struct intel_display *display, struct intel_panel *panel) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) >= 11) icl_fixup_mipi_sequences(display, panel); - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) vlv_fixup_mipi_sequences(display, panel); } @@ -2066,8 +2063,8 @@ parse_mipi_sequence(struct intel_display *display, return; } - drm_dbg(display->drm, "Found MIPI sequence block v%u\n", - sequence->version); + drm_dbg_kms(display->drm, "Found MIPI sequence block v%u\n", + sequence->version); seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size); if (!seq_data) @@ -2113,7 +2110,7 @@ parse_mipi_sequence(struct intel_display *display, fixup_mipi_sequences(display, panel); - drm_dbg(display->drm, "MIPI related VBT parsing complete\n"); + drm_dbg_kms(display->drm, "MIPI related VBT parsing complete\n"); return; err: @@ -2242,15 +2239,15 @@ static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin) const u8 *ddc_pin_map; int i, n_entries; - if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) { ddc_pin_map = adlp_ddc_pin_map; n_entries = ARRAY_SIZE(adlp_ddc_pin_map); - } else if (IS_ALDERLAKE_S(i915)) { + } else if (display->platform.alderlake_s) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { return vbt_pin; - } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { + } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) { @@ -2333,7 +2330,6 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo, static enum port dvo_port_to_port(struct intel_display *display, u8 dvo_port) { - struct drm_i915_private *i915 = to_i915(display->drm); /* * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. @@ -2390,12 +2386,12 @@ static enum port dvo_port_to_port(struct intel_display *display, ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, dvo_port); - else if (IS_ALDERLAKE_S(i915)) + else if (display->platform.alderlake_s) return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping), ARRAY_SIZE(adls_port_mapping[0]), adls_port_mapping, dvo_port); - else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) + else if (display->platform.dg1 || display->platform.rocketlake) return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), ARRAY_SIZE(rkl_port_mapping[0]), rkl_port_mapping, @@ -2518,7 +2514,6 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, enum port port) { struct intel_display *display = devdata->display; - struct drm_i915_private *i915 = to_i915(display->drm); if (!intel_bios_encoder_supports_dvi(devdata)) return; @@ -2528,7 +2523,7 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, * with a HSW VBT where the level shifter value goes * up to 11, whereas the BDW max is 9. */ - if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) { + if (display->platform.broadwell && devdata->child.hdmi_level_shifter_value > 9) { drm_dbg_kms(display->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", port_name(port), devdata->child.hdmi_level_shifter_value, 9); @@ -2617,14 +2612,13 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata static bool is_port_valid(struct intel_display *display, enum port port) { - struct drm_i915_private *i915 = to_i915(display->drm); /* * On some ICL SKUs port F is not present, but broken VBTs mark * the port as present. Only try to initialize port F for the * SKUs that may actually have it. */ - if (port == PORT_F && IS_ICELAKE(i915)) - return IS_ICL_WITH_PORT_F(i915); + if (port == PORT_F && display->platform.icelake) + return display->platform.icelake_port_f; return true; } @@ -2722,9 +2716,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) static bool has_ddi_port_info(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - return DISPLAY_VER(display) >= 5 || IS_G4X(i915); + return DISPLAY_VER(display) >= 5 || display->platform.g4x; } static void parse_ddi_ports(struct intel_display *display) @@ -2770,9 +2762,9 @@ static bool child_device_size_valid(struct intel_display *display, int size) expected_size = child_device_expected_size(display->vbt.version); if (expected_size < 0) { expected_size = sizeof(struct child_device_config); - drm_dbg(display->drm, - "Expected child device config size for VBT version %u not known; assuming %d\n", - display->vbt.version, expected_size); + drm_dbg_kms(display->drm, + "Expected child device config size for VBT version %u not known; assuming %d\n", + display->vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ @@ -2795,7 +2787,6 @@ static bool child_device_size_valid(struct intel_display *display, int size) static void parse_general_definitions(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct bdb_general_definitions *defs; struct intel_bios_encoder_data *devdata; const struct child_device_config *child; @@ -2820,7 +2811,7 @@ parse_general_definitions(struct intel_display *display) bus_pin = defs->crt_ddc_gmbus_pin; drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin); - if (intel_gmbus_is_valid_pin(i915, bus_pin)) + if (intel_gmbus_is_valid_pin(display, bus_pin)) display->vbt.crt_ddc_pin = bus_pin; if (!child_device_size_valid(display, defs->child_dev_size)) @@ -2906,7 +2897,7 @@ init_vbt_missing_defaults(struct intel_display *display) unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask; enum port port; - if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915)) + if (!HAS_DDI(display) && !display->platform.cherryview) return; for_each_port_masked(port, ports) { @@ -2963,6 +2954,9 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) return _vbt + vbt->bdb_offset; } +static const char vbt_signature[] = "$VBT"; +static const int vbt_signature_len = 4; + /** * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT * @display: display device @@ -2985,7 +2979,7 @@ bool intel_bios_is_valid_vbt(struct intel_display *display, return false; } - if (memcmp(vbt->signature, "$VBT", 4)) { + if (memcmp(vbt->signature, vbt_signature, vbt_signature_len)) { drm_dbg_kms(display->drm, "VBT invalid signature\n"); return false; } @@ -3052,131 +3046,59 @@ static struct vbt_header *firmware_get_vbt(struct intel_display *display, return vbt; } -static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) -{ - intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset); - - return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); -} - -static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display, - size_t *size) -{ - struct drm_i915_private *i915 = to_i915(display->drm); - u32 count, data, found, store = 0; - u32 static_region, oprom_offset; - u32 oprom_size = 0x200000; - u16 vbt_size; - u32 *vbt; - - static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS); - static_region &= OPTIONROM_SPI_REGIONID_MASK; - intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region); - - oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET); - oprom_offset &= OROM_OFFSET_MASK; - - for (count = 0; count < oprom_size; count += 4) { - data = intel_spi_read(&i915->uncore, oprom_offset + count); - if (data == *((const u32 *)"$VBT")) { - found = oprom_offset + count; - break; - } - } - - if (count >= oprom_size) - goto err_not_found; - - /* Get VBT size and allocate space for the VBT */ - vbt_size = intel_spi_read(&i915->uncore, - found + offsetof(struct vbt_header, vbt_size)); - vbt_size &= 0xffff; - - vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); - if (!vbt) - goto err_not_found; - - for (count = 0; count < vbt_size; count += 4) - *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); - - if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) - goto err_free_vbt; - - drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n"); - - if (size) - *size = vbt_size; - - return (struct vbt_header *)vbt; - -err_free_vbt: - kfree(vbt); -err_not_found: - return NULL; -} - static struct vbt_header *oprom_get_vbt(struct intel_display *display, - size_t *sizep) + struct intel_rom *rom, + size_t *size, const char *type) { - struct pci_dev *pdev = to_pci_dev(display->drm->dev); - void __iomem *p = NULL, *oprom; struct vbt_header *vbt; - u16 vbt_size; - size_t i, size; + size_t vbt_size; + loff_t offset; - oprom = pci_map_rom(pdev, &size); - if (!oprom) + if (!rom) return NULL; - /* Scour memory looking for the VBT signature. */ - for (i = 0; i + 4 < size; i += 4) { - if (ioread32(oprom + i) != *((const u32 *)"$VBT")) - continue; + BUILD_BUG_ON(vbt_signature_len != sizeof(vbt_signature) - 1); + BUILD_BUG_ON(vbt_signature_len != sizeof(u32)); - p = oprom + i; - size -= i; - break; - } + offset = intel_rom_find(rom, *(const u32 *)vbt_signature); + if (offset < 0) + goto err_free_rom; - if (!p) - goto err_unmap_oprom; - - if (sizeof(struct vbt_header) > size) { - drm_dbg(display->drm, "VBT header incomplete\n"); - goto err_unmap_oprom; + if (sizeof(struct vbt_header) > intel_rom_size(rom) - offset) { + drm_dbg_kms(display->drm, "VBT header incomplete\n"); + goto err_free_rom; } - vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); - if (vbt_size > size) { - drm_dbg(display->drm, - "VBT incomplete (vbt_size overflows)\n"); - goto err_unmap_oprom; + BUILD_BUG_ON(sizeof(vbt->vbt_size) != sizeof(u16)); + + vbt_size = intel_rom_read16(rom, offset + offsetof(struct vbt_header, vbt_size)); + if (vbt_size > intel_rom_size(rom) - offset) { + drm_dbg_kms(display->drm, "VBT incomplete (vbt_size overflows)\n"); + goto err_free_rom; } - /* The rest will be validated by intel_bios_is_valid_vbt() */ - vbt = kmalloc(vbt_size, GFP_KERNEL); + vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); if (!vbt) - goto err_unmap_oprom; + goto err_free_rom; - memcpy_fromio(vbt, p, vbt_size); + intel_rom_read_block(rom, vbt, offset, vbt_size); if (!intel_bios_is_valid_vbt(display, vbt, vbt_size)) goto err_free_vbt; - pci_unmap_rom(pdev, oprom); + drm_dbg_kms(display->drm, "Found valid VBT in %s\n", type); - if (sizep) - *sizep = vbt_size; + if (size) + *size = vbt_size; - drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n"); + intel_rom_free(rom); return vbt; err_free_vbt: kfree(vbt); -err_unmap_oprom: - pci_unmap_rom(pdev, oprom); - +err_free_rom: + intel_rom_free(rom); return NULL; } @@ -3198,11 +3120,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display */ if (!vbt && IS_DGFX(i915)) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = spi_oprom_get_vbt(display, sizep); + vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash"); if (!vbt) with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vbt = oprom_get_vbt(display, sizep); + vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM"); return vbt; } @@ -3406,7 +3328,6 @@ bool intel_bios_is_tv_present(struct intel_display *display) */ bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin) { - struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_bios_encoder_data *devdata; if (list_empty(&display->vbt.display_devices)) @@ -3423,7 +3344,7 @@ bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin) child->device_type != DEVICE_TYPE_LFP) continue; - if (intel_gmbus_is_valid_pin(i915, child->i2c_pin)) + if (intel_gmbus_is_valid_pin(display, child->i2c_pin)) *i2c_pin = child->i2c_pin; /* However, we cannot trust the BIOS writers to populate @@ -3671,17 +3592,16 @@ static const u8 direct_aux_ch_map[] = { static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel) { - struct drm_i915_private *i915 = to_i915(display->drm); const u8 *aux_ch_map; int i, n_entries; if (DISPLAY_VER(display) >= 13) { aux_ch_map = adlp_aux_ch_map; n_entries = ARRAY_SIZE(adlp_aux_ch_map); - } else if (IS_ALDERLAKE_S(i915)) { + } else if (display->platform.alderlake_s) { aux_ch_map = adls_aux_ch_map; n_entries = ARRAY_SIZE(adls_aux_ch_map); - } else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) { + } else if (display->platform.dg1 || display->platform.rocketlake) { aux_ch_map = rkl_aux_ch_map; n_entries = ARRAY_SIZE(rkl_aux_ch_map); } else { diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c new file mode 100644 index 000000000000..fbd16d7b58d9 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include "gem/i915_gem_mman.h" +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_object_frontbuffer.h" +#include "i915_debugfs.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + return i915_gem_object_is_tiled(to_intel_bo(obj)); +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + return i915_gem_object_is_userptr(to_intel_bo(obj)); +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return i915_gem_object_is_shmem(to_intel_bo(obj)); +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return i915_gem_object_is_protected(to_intel_bo(obj)); +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ + i915_gem_object_flush_if_display(to_intel_bo(obj)); +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return i915_gem_fb_mmap(to_intel_bo(obj), vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size); +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return i915_gem_object_get_frontbuffer(to_intel_bo(obj)); +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front); +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + i915_debugfs_describe_obj(m, to_intel_bo(obj)); +} diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h new file mode 100644 index 000000000000..ea7a2253aaa5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_bo.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_BO__ +#define __INTEL_BO__ + +#include <linux/types.h> + +struct drm_gem_object; +struct seq_file; +struct vm_area_struct; + +bool intel_bo_is_tiled(struct drm_gem_object *obj); +bool intel_bo_is_userptr(struct drm_gem_object *obj); +bool intel_bo_is_shmem(struct drm_gem_object *obj); +bool intel_bo_is_protected(struct drm_gem_object *obj); +void intel_bo_flush_if_display(struct drm_gem_object *obj); +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size); + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj); +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front); + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj); + +#endif /* __INTEL_BO__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 47036d4abb33..a52b0ae68b96 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -743,7 +743,7 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1) && IS_DGFX(dev_priv)) + if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); else if (DISPLAY_VER(dev_priv) >= 14) tgl_get_bw_info(dev_priv, &mtl_sa_info); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index aa3ba66c5307..03c4eef3f92a 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/time.h> #include <drm/drm_fixed.h> @@ -112,81 +113,81 @@ */ struct intel_cdclk_funcs { - void (*get_cdclk)(struct drm_i915_private *i915, + void (*get_cdclk)(struct intel_display *display, struct intel_cdclk_config *cdclk_config); - void (*set_cdclk)(struct drm_i915_private *i915, + void (*set_cdclk)(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); u8 (*calc_voltage_level)(int cdclk); }; -void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, +void intel_cdclk_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config); + display->funcs.cdclk->get_cdclk(display, cdclk_config); } -static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, +static void intel_cdclk_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe); + display->funcs.cdclk->set_cdclk(display, cdclk_config, pipe); } static int intel_cdclk_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(state); + return display->funcs.cdclk->modeset_calc_cdclk(state); } -static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, +static u8 intel_cdclk_calc_voltage_level(struct intel_display *display, int cdclk) { - return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk); + return display->funcs.cdclk->calc_voltage_level(cdclk); } -static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_133mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 133333; } -static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_200mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 200000; } -static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_266mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 266667; } -static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_333mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 333333; } -static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_400mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 400000; } -static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, +static void fixed_450mhz_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 450000; } -static void i85x_get_cdclk(struct drm_i915_private *dev_priv, +static void i85x_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 hpllcc = 0; /* @@ -225,10 +226,10 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i915gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -249,10 +250,10 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i945gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -273,7 +274,7 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, } } -static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) +static unsigned int intel_hpll_vco(struct intel_display *display) { static const unsigned int blb_vco[8] = { [0] = 3200000, @@ -312,6 +313,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) [4] = 2666667, [5] = 4266667, }; + struct drm_i915_private *dev_priv = to_i915(display->drm); const unsigned int *vco_table; unsigned int vco; u8 tmp = 0; @@ -330,23 +332,23 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) else return 0; - tmp = intel_de_read(dev_priv, + tmp = intel_de_read(display, IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) - drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", + drm_err(display->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); else - drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); + drm_dbg_kms(display->drm, "HPLL VCO %u kHz\n", vco); return vco; } -static void g33_get_cdclk(struct drm_i915_private *dev_priv, +static void g33_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; @@ -355,7 +357,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -386,16 +388,16 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, return; fail: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 190476; } -static void pnv_get_cdclk(struct drm_i915_private *dev_priv, +static void pnv_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -414,7 +416,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = 200000; break; default: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unknown pnv display core clock 0x%04x\n", gcfgc); fallthrough; case GC_DISPLAY_CLOCK_133_MHZ_PNV: @@ -426,10 +428,10 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, } } -static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, +static void i965gm_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); static const u8 div_3200[] = { 16, 10, 8 }; static const u8 div_4000[] = { 20, 12, 10 }; static const u8 div_5333[] = { 24, 16, 14 }; @@ -437,7 +439,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -465,20 +467,20 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, return; fail: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 200000; } -static void gm45_get_cdclk(struct drm_i915_private *dev_priv, +static void gm45_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); unsigned int cdclk_sel; u16 tmp = 0; - cdclk_config->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(display); pci_read_config_word(pdev, GCFGC, &tmp); @@ -494,7 +496,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; break; default: - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 222222; @@ -502,15 +504,16 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, } } -static void hsw_get_cdclk(struct drm_i915_private *dev_priv, +static void hsw_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); + struct drm_i915_private *dev_priv = to_i915(display->drm); + u32 lcpll = intel_de_read(display, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; - else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; @@ -520,8 +523,9 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv, cdclk_config->cdclk = 540000; } -static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; @@ -540,8 +544,10 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) return 200000; } -static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) +static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (IS_VALLEYVIEW(dev_priv)) { if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ return 2; @@ -559,9 +565,10 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) } } -static void vlv_get_cdclk(struct drm_i915_private *dev_priv, +static void vlv_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; vlv_iosf_sb_get(dev_priv, @@ -585,8 +592,9 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, DSPFREQGUAR_SHIFT_CHV; } -static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) +static void vlv_program_pfi_credits(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); unsigned int credits, default_credits; if (IS_CHERRYVIEW(dev_priv)) @@ -594,7 +602,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) else default_credits = PFI_CREDIT(8); - if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) { /* CHV suggested value is 31 or 63 */ if (IS_CHERRYVIEW(dev_priv)) credits = PFI_CREDIT_63; @@ -608,24 +616,25 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) * WA - write default credits before re-programming * FIXME: should we also set the resend bit here? */ - intel_de_write(dev_priv, GCI_CONTROL, + intel_de_write(display, GCI_CONTROL, VGA_FAST_MODE_DISABLE | default_credits); - intel_de_write(dev_priv, GCI_CONTROL, + intel_de_write(display, GCI_CONTROL, VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); /* * FIXME is this guaranteed to clear * immediately or should we poll for it? */ - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); + drm_WARN_ON(display->drm, + intel_de_read(display, GCI_CONTROL) & PFI_CREDIT_RESEND); } -static void vlv_set_cdclk(struct drm_i915_private *dev_priv, +static void vlv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -662,7 +671,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } @@ -681,7 +690,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } @@ -704,17 +713,18 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, BIT(VLV_IOSF_SB_BUNIT) | BIT(VLV_IOSF_SB_PUNIT)); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - vlv_program_pfi_credits(dev_priv); + vlv_program_pfi_credits(display); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } -static void chv_set_cdclk(struct drm_i915_private *dev_priv, +static void chv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -746,15 +756,15 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for CDclk change\n"); } vlv_punit_put(dev_priv); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - vlv_program_pfi_credits(dev_priv); + vlv_program_pfi_credits(display); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } @@ -786,15 +796,15 @@ static u8 bdw_calc_voltage_level(int cdclk) } } -static void bdw_get_cdclk(struct drm_i915_private *dev_priv, +static void bdw_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); + u32 lcpll = intel_de_read(display, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; - else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; @@ -830,15 +840,16 @@ static u32 bdw_cdclk_freq_sel(int cdclk) } } -static void bdw_set_cdclk(struct drm_i915_private *dev_priv, +static void bdw_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int ret; - if (drm_WARN(&dev_priv->drm, - (intel_de_read(dev_priv, LCPLL_CTL) & + if (drm_WARN(display->drm, + (intel_de_read(display, LCPLL_CTL) & (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | @@ -848,39 +859,39 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "failed to inform pcode about cdclk change\n"); return; } - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_CD_SOURCE_FCLK); /* * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ - if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & + if (wait_for_us(intel_de_read(display, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 100)) - drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); + drm_err(display->drm, "Switching to FCLK failed\n"); - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk)); - intel_de_rmw(dev_priv, LCPLL_CTL, + intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & + if (wait_for_us((intel_de_read(display, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) - drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); + drm_err(display->drm, "Switching back to LCPLL failed\n"); snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level); - intel_de_write(dev_priv, CDCLK_FREQ, + intel_de_write(display, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); } static int skl_calc_cdclk(int min_cdclk, int vco) @@ -918,7 +929,7 @@ static u8 skl_calc_voltage_level(int cdclk) return 0; } -static void skl_dpll0_update(struct drm_i915_private *dev_priv, +static void skl_dpll0_update(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 val; @@ -926,16 +937,16 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, cdclk_config->ref = 24000; cdclk_config->vco = 0; - val = intel_de_read(dev_priv, LCPLL1_CTL); + val = intel_de_read(display, LCPLL1_CTL); if ((val & LCPLL_PLL_ENABLE) == 0) return; - if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) + if (drm_WARN_ON(display->drm, (val & LCPLL_PLL_LOCK) == 0)) return; - val = intel_de_read(dev_priv, DPLL_CTRL1); + val = intel_de_read(display, DPLL_CTRL1); - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != @@ -959,19 +970,19 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, } } -static void skl_get_cdclk(struct drm_i915_private *dev_priv, +static void skl_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 cdctl; - skl_dpll0_update(dev_priv, cdclk_config); + skl_dpll0_update(display, cdclk_config); cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; if (cdclk_config->vco == 0) goto out; - cdctl = intel_de_read(dev_priv, CDCLK_CTL); + cdctl = intel_de_read(display, CDCLK_CTL); if (cdclk_config->vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { @@ -1026,19 +1037,19 @@ static int skl_cdclk_decimal(int cdclk) return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco) +static void skl_set_preferred_cdclk_vco(struct intel_display *display, int vco) { - bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco; + bool changed = display->cdclk.skl_preferred_vco_freq != vco; - i915->display.cdclk.skl_preferred_vco_freq = vco; + display->cdclk.skl_preferred_vco_freq = vco; if (changed) - intel_update_max_cdclk(i915); + intel_update_max_cdclk(display); } -static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) +static u32 skl_dpll0_link_rate(struct intel_display *display, int vco) { - drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); + drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000); /* * We always enable DPLL0 with the lowest link rate possible, but still @@ -1055,47 +1066,47 @@ static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0); } -static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) +static void skl_dpll0_enable(struct intel_display *display, int vco) { - intel_de_rmw(dev_priv, DPLL_CTRL1, + intel_de_rmw(display, DPLL_CTRL1, DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0), DPLL_CTRL1_OVERRIDE(SKL_DPLL0) | - skl_dpll0_link_rate(dev_priv, vco)); - intel_de_posting_read(dev_priv, DPLL_CTRL1); + skl_dpll0_link_rate(display, vco)); + intel_de_posting_read(display, DPLL_CTRL1); - intel_de_rmw(dev_priv, LCPLL1_CTL, + intel_de_rmw(display, LCPLL1_CTL, 0, LCPLL_PLL_ENABLE); - if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) - drm_err(&dev_priv->drm, "DPLL0 not locked\n"); + if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) + drm_err(display->drm, "DPLL0 not locked\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; /* We'll want to keep using the current vco from now on. */ - skl_set_preferred_cdclk_vco(dev_priv, vco); + skl_set_preferred_cdclk_vco(display, vco); } -static void skl_dpll0_disable(struct drm_i915_private *dev_priv) +static void skl_dpll0_disable(struct intel_display *display) { - intel_de_rmw(dev_priv, LCPLL1_CTL, + intel_de_rmw(display, LCPLL1_CTL, LCPLL_PLL_ENABLE, 0); - if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); + if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) + drm_err(display->drm, "Couldn't disable DPLL0\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, +static u32 skl_cdclk_freq_sel(struct intel_display *display, int cdclk, int vco) { switch (cdclk) { default: - drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->display.cdclk.hw.bypass); - drm_WARN_ON(&dev_priv->drm, vco != 0); + drm_WARN_ON(display->drm, + cdclk != display->cdclk.hw.bypass); + drm_WARN_ON(display->drm, vco != 0); fallthrough; case 308571: case 337500: @@ -1111,10 +1122,11 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, } } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, +static void skl_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 freq_select, cdclk_ctl; @@ -1128,7 +1140,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, * use the corresponding VCO freq as that always leads to using the * minimum 308MHz CDCLK. */ - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, IS_SKYLAKE(dev_priv) && vco == 8640000); ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, @@ -1136,54 +1148,54 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to inform PCU about cdclk change (%d)\n", ret); return; } - freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); + freq_select = skl_cdclk_freq_sel(display, cdclk, vco); - if (dev_priv->display.cdclk.hw.vco != 0 && - dev_priv->display.cdclk.hw.vco != vco) - skl_dpll0_disable(dev_priv); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + skl_dpll0_disable(display); - cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); + cdclk_ctl = intel_de_read(display, CDCLK_CTL); - if (dev_priv->display.cdclk.hw.vco != vco) { + if (display->cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); } /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); - intel_de_posting_read(dev_priv, CDCLK_CTL); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(display, CDCLK_CTL); - if (dev_priv->display.cdclk.hw.vco != vco) - skl_dpll0_enable(dev_priv, vco); + if (display->cdclk.hw.vco != vco) + skl_dpll0_enable(display, vco); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; - intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); - intel_de_posting_read(dev_priv, CDCLK_CTL); + intel_de_write(display, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(display, CDCLK_CTL); /* inform PCU of the change */ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); } -static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct intel_display *display) { u32 cdctl, expected; @@ -1192,15 +1204,15 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * There is SWF18 scratchpad register defined which is set by the * pre-os which can be used by the OS drivers to check the status */ - if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) + if ((intel_de_read(display, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ - if (dev_priv->display.cdclk.hw.vco == 0 || - dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) + if (display->cdclk.hw.vco == 0 || + display->cdclk.hw.cdclk == display->cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1209,60 +1221,60 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - cdctl = intel_de_read(dev_priv, CDCLK_CTL); + cdctl = intel_de_read(display, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | - skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk); + skl_cdclk_decimal(display->cdclk.hw.cdclk); if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: - drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->display.cdclk.hw.cdclk = 0; + display->cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = ~0; + display->cdclk.hw.vco = ~0; } -static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) +static void skl_cdclk_init_hw(struct intel_display *display) { struct intel_cdclk_config cdclk_config; - skl_sanitize_cdclk(dev_priv); + skl_sanitize_cdclk(display); - if (dev_priv->display.cdclk.hw.cdclk != 0 && - dev_priv->display.cdclk.hw.vco != 0) { + if (display->cdclk.hw.cdclk != 0 && + display->cdclk.hw.vco != 0) { /* * Use the current vco as our initial * guess as to what the preferred vco is. */ - if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0) - skl_set_preferred_cdclk_vco(dev_priv, - dev_priv->display.cdclk.hw.vco); + if (display->cdclk.skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(display, + display->cdclk.hw.vco); return; } - cdclk_config = dev_priv->display.cdclk.hw; + cdclk_config = display->cdclk.hw; - cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq; + cdclk_config.vco = display->cdclk.skl_preferred_vco_freq; if (cdclk_config.vco == 0) cdclk_config.vco = 8100000; cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + skl_set_cdclk(display, &cdclk_config, INVALID_PIPE); } -static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) +static void skl_cdclk_uninit_hw(struct intel_display *display) { - struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; + struct intel_cdclk_config cdclk_config = display->cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + skl_set_cdclk(display, &cdclk_config, INVALID_PIPE); } struct intel_cdclk_vals { @@ -1456,6 +1468,39 @@ static const struct intel_cdclk_vals xe2hpd_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = { + { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 326400, .ratio = 17, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 345600, .ratio = 18, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 364800, .ratio = 19, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 384000, .ratio = 20, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff }, + { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff }, + {} +}; + static const int cdclk_squash_len = 16; static int cdclk_squash_divider(u16 waveform) @@ -1470,37 +1515,37 @@ static int cdclk_divider(int cdclk, int vco, u16 waveform) cdclk * cdclk_squash_len); } -static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk >= min_cdclk) return table[i].cdclk; - drm_WARN(&dev_priv->drm, 1, + drm_WARN(display->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", - min_cdclk, dev_priv->display.cdclk.hw.ref); + min_cdclk, display->cdclk.hw.ref); return 0; } -static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) +static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; - if (cdclk == dev_priv->display.cdclk.hw.bypass) + if (cdclk == display->cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk == cdclk) - return dev_priv->display.cdclk.hw.ref * table[i].ratio; + return display->cdclk.hw.ref * table[i].ratio; - drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->display.cdclk.hw.ref); + drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, display->cdclk.hw.ref); return 0; } @@ -1582,10 +1627,20 @@ static u8 rplu_calc_voltage_level(int cdclk) rplu_voltage_level_max_cdclk); } -static void icl_readout_refclk(struct drm_i915_private *dev_priv, +static u8 xe3lpd_calc_voltage_level(int cdclk) +{ + /* + * Starting with xe3lpd power controller does not need the voltage + * index when doing the modeset update. This function is best left + * defined but returning 0 to the mask. + */ + return 0; +} + +static void icl_readout_refclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { - u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; + u32 dssm = intel_de_read(display, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; switch (dssm) { default: @@ -1603,19 +1658,20 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv, } } -static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, +static void bxt_de_pll_readout(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val, ratio; if (IS_DG2(dev_priv)) cdclk_config->ref = 38400; - else if (DISPLAY_VER(dev_priv) >= 11) - icl_readout_refclk(dev_priv, cdclk_config); + else if (DISPLAY_VER(display) >= 11) + icl_readout_refclk(display, cdclk_config); else cdclk_config->ref = 19200; - val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); + val = intel_de_read(display, BXT_DE_PLL_ENABLE); if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || (val & BXT_DE_PLL_LOCK) == 0) { /* @@ -1630,26 +1686,26 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register, * gen9lp had it in a separate PLL control register. */ - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) ratio = val & ICL_CDCLK_PLL_RATIO_MASK; else - ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; + ratio = intel_de_read(display, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; cdclk_config->vco = ratio * cdclk_config->ref; } -static void bxt_get_cdclk(struct drm_i915_private *dev_priv, +static void bxt_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config) { u32 squash_ctl = 0; u32 divider; int div; - bxt_de_pll_readout(dev_priv, cdclk_config); + bxt_de_pll_readout(display, cdclk_config); - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) cdclk_config->bypass = cdclk_config->ref / 2; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) cdclk_config->bypass = 50000; else cdclk_config->bypass = cdclk_config->ref; @@ -1659,7 +1715,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, goto out; } - divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; + divider = intel_de_read(display, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: @@ -1679,8 +1735,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - if (HAS_CDCLK_SQUASH(dev_priv)) - squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); + if (HAS_CDCLK_SQUASH(display)) + squash_ctl = intel_de_read(display, CDCLK_SQUASH_CTL); if (squash_ctl & CDCLK_SQUASH_ENABLE) { u16 waveform; @@ -1696,107 +1752,107 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, } out: - if (DISPLAY_VER(dev_priv) >= 20) - cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN; + if (DISPLAY_VER(display) >= 20) + cdclk_config->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN; /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config->cdclk); } -static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +static void bxt_de_pll_disable(struct intel_display *display) { - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); + intel_de_write(display, BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(dev_priv, + if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); + drm_err(display->drm, "timeout waiting for DE PLL unlock\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +static void bxt_de_pll_enable(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); - intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, + intel_de_rmw(display, BXT_DE_PLL_CTL, BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); + drm_err(display->drm, "timeout waiting for DE PLL lock\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) +static void icl_cdclk_pll_disable(struct intel_display *display) { - intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE, + intel_de_rmw(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); + if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n"); - dev_priv->display.cdclk.hw.vco = 0; + display->cdclk.hw.vco = 0; } -static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) +static void icl_cdclk_pll_enable(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); u32 val; val = ICL_CDCLK_PLL_RATIO(ratio); - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); val |= BXT_DE_PLL_PLL_ENABLE; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) + drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n"); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) +static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref); u32 val; /* Write PLL ratio without disabling */ val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Submit freq change request */ val |= BXT_DE_PLL_FREQ_REQ; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, + if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) - drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); + drm_err(display->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; - intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); + intel_de_write(display, BXT_DE_PLL_ENABLE, val); - dev_priv->display.cdclk.hw.vco = vco; + display->cdclk.hw.vco = vco; } -static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +static u32 bxt_cdclk_cd2x_pipe(struct intel_display *display, enum pipe pipe) { - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (pipe == INVALID_PIPE) return TGL_CDCLK_CD2X_PIPE_NONE; else return TGL_CDCLK_CD2X_PIPE(pipe); - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { if (pipe == INVALID_PIPE) return ICL_CDCLK_CD2X_PIPE_NONE; else @@ -1809,15 +1865,15 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe } } -static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, +static u32 bxt_cdclk_cd2x_div_sel(struct intel_display *display, int cdclk, int vco, u16 waveform) { /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (cdclk_divider(cdclk, vco, waveform)) { default: - drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->display.cdclk.hw.bypass); - drm_WARN_ON(&dev_priv->drm, vco != 0); + drm_WARN_ON(display->drm, + cdclk != display->cdclk.hw.bypass); + drm_WARN_ON(display->drm, vco != 0); fallthrough; case 2: return BXT_CDCLK_CD2X_DIV_SEL_1; @@ -1830,47 +1886,47 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } -static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv, +static u16 cdclk_squash_waveform(struct intel_display *display, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; + const struct intel_cdclk_vals *table = display->cdclk.table; int i; - if (cdclk == dev_priv->display.cdclk.hw.bypass) + if (cdclk == display->cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->display.cdclk.hw.ref && + if (table[i].refclk == display->cdclk.hw.ref && table[i].cdclk == cdclk) return table[i].waveform; - drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->display.cdclk.hw.ref); + drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, display->cdclk.hw.ref); return 0xffff; } -static void icl_cdclk_pll_update(struct drm_i915_private *i915, int vco) +static void icl_cdclk_pll_update(struct intel_display *display, int vco) { - if (i915->display.cdclk.hw.vco != 0 && - i915->display.cdclk.hw.vco != vco) - icl_cdclk_pll_disable(i915); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + icl_cdclk_pll_disable(display); - if (i915->display.cdclk.hw.vco != vco) - icl_cdclk_pll_enable(i915, vco); + if (display->cdclk.hw.vco != vco) + icl_cdclk_pll_enable(display, vco); } -static void bxt_cdclk_pll_update(struct drm_i915_private *i915, int vco) +static void bxt_cdclk_pll_update(struct intel_display *display, int vco) { - if (i915->display.cdclk.hw.vco != 0 && - i915->display.cdclk.hw.vco != vco) - bxt_de_pll_disable(i915); + if (display->cdclk.hw.vco != 0 && + display->cdclk.hw.vco != vco) + bxt_de_pll_disable(display); - if (i915->display.cdclk.hw.vco != vco) - bxt_de_pll_enable(i915, vco); + if (display->cdclk.hw.vco != vco) + bxt_de_pll_enable(display, vco); } -static void dg2_cdclk_squash_program(struct drm_i915_private *i915, +static void dg2_cdclk_squash_program(struct intel_display *display, u16 waveform) { u32 squash_ctl = 0; @@ -1879,7 +1935,7 @@ static void dg2_cdclk_squash_program(struct drm_i915_private *i915, squash_ctl = CDCLK_SQUASH_ENABLE | CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; - intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl); + intel_de_write(display, CDCLK_SQUASH_CTL, squash_ctl); } static bool cdclk_pll_is_unknown(unsigned int vco) @@ -1892,38 +1948,40 @@ static bool cdclk_pll_is_unknown(unsigned int vco) return vco == ~0; } -static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915) +static bool mdclk_source_is_cdclk_pll(struct intel_display *display) { - return DISPLAY_VER(i915) >= 20; + return DISPLAY_VER(display) >= 20; } -static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915) +static u32 xe2lpd_mdclk_source_sel(struct intel_display *display) { - if (mdclk_source_is_cdclk_pll(i915)) + if (mdclk_source_is_cdclk_pll(display)) return MDCLK_SOURCE_SEL_CDCLK_PLL; return MDCLK_SOURCE_SEL_CD2XCLK; } -int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, +int intel_mdclk_cdclk_ratio(struct intel_display *display, const struct intel_cdclk_config *cdclk_config) { - if (mdclk_source_is_cdclk_pll(i915)) + if (mdclk_source_is_cdclk_pll(display)) return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk); /* Otherwise, source for MDCLK is CD2XCLK. */ return 2; } -static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915, +static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display, const struct intel_cdclk_config *cdclk_config) { + struct drm_i915_private *i915 = to_i915(display->drm); + intel_dbuf_mdclk_cdclk_ratio_update(i915, - intel_mdclk_cdclk_ratio(i915, cdclk_config), + intel_mdclk_cdclk_ratio(display, cdclk_config), cdclk_config->joined_mbus); } -static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915, +static bool cdclk_compute_crawl_and_squash_midpoint(struct intel_display *display, const struct intel_cdclk_config *old_cdclk_config, const struct intel_cdclk_config *new_cdclk_config, struct intel_cdclk_config *mid_cdclk_config) @@ -1936,11 +1994,11 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 return false; /* Return if both Squash and Crawl are not present */ - if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) + if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display)) return false; - old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk); - new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk); + old_waveform = cdclk_squash_waveform(display, old_cdclk_config->cdclk); + new_waveform = cdclk_squash_waveform(display, new_cdclk_config->cdclk); /* Return if Squash only or Crawl only is the desired action */ if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 || @@ -1957,7 +2015,7 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 * Should not happen currently. We might need more midpoint * transitions if we need to also change the cd2x divider. */ - if (drm_WARN_ON(&i915->drm, old_div != new_div)) + if (drm_WARN_ON(display->drm, old_div != new_div)) return false; *mid_cdclk_config = *new_cdclk_config; @@ -1986,37 +2044,40 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 /* make sure the mid clock came out sane */ - drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk < + drm_WARN_ON(display->drm, mid_cdclk_config->cdclk < min(old_cdclk_config->cdclk, new_cdclk_config->cdclk)); - drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk > - i915->display.cdclk.max_cdclk_freq); - drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) != + drm_WARN_ON(display->drm, mid_cdclk_config->cdclk > + display->cdclk.max_cdclk_freq); + drm_WARN_ON(display->drm, cdclk_squash_waveform(display, mid_cdclk_config->cdclk) != mid_waveform); return true; } -static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) +static bool pll_enable_wa_needed(struct intel_display *display) { - return (DISPLAY_VER_FULL(dev_priv) == IP_VER(20, 0) || - DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0) || + struct drm_i915_private *dev_priv = to_i915(display->drm); + + return (DISPLAY_VERx100(display) == 2000 || + DISPLAY_VERx100(display) == 1400 || IS_DG2(dev_priv)) && - dev_priv->display.cdclk.hw.vco > 0; + display->cdclk.hw.vco > 0; } -static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, +static u32 bxt_cdclk_ctl(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *i915 = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u16 waveform; u32 val; - waveform = cdclk_squash_waveform(i915, cdclk); + waveform = cdclk_squash_waveform(display, cdclk); - val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) | - bxt_cdclk_cd2x_pipe(i915, pipe); + val = bxt_cdclk_cd2x_div_sel(display, cdclk, vco, waveform) | + bxt_cdclk_cd2x_pipe(display, pipe); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, @@ -2026,50 +2087,52 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, cdclk >= 500000) val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (DISPLAY_VER(i915) >= 20) - val |= xe2lpd_mdclk_source_sel(i915); + if (DISPLAY_VER(display) >= 20) + val |= xe2lpd_mdclk_source_sel(display); else val |= skl_cdclk_decimal(cdclk); return val; } -static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, +static void _bxt_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && - !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { - if (dev_priv->display.cdclk.hw.vco != vco) - adlp_cdclk_pll_crawl(dev_priv, vco); - } else if (DISPLAY_VER(dev_priv) >= 11) { + if (HAS_CDCLK_CRAWL(display) && display->cdclk.hw.vco > 0 && vco > 0 && + !cdclk_pll_is_unknown(display->cdclk.hw.vco)) { + if (display->cdclk.hw.vco != vco) + adlp_cdclk_pll_crawl(display, vco); + } else if (DISPLAY_VER(display) >= 11) { /* wa_15010685871: dg2, mtl */ - if (pll_enable_wa_needed(dev_priv)) - dg2_cdclk_squash_program(dev_priv, 0); + if (pll_enable_wa_needed(display)) + dg2_cdclk_squash_program(display, 0); - icl_cdclk_pll_update(dev_priv, vco); - } else - bxt_cdclk_pll_update(dev_priv, vco); + icl_cdclk_pll_update(display, vco); + } else { + bxt_cdclk_pll_update(display, vco); + } - if (HAS_CDCLK_SQUASH(dev_priv)) { - u16 waveform = cdclk_squash_waveform(dev_priv, cdclk); + if (HAS_CDCLK_SQUASH(display)) { + u16 waveform = cdclk_squash_waveform(display, cdclk); - dg2_cdclk_squash_program(dev_priv, waveform); + dg2_cdclk_squash_program(display, waveform); } - intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe)); + intel_de_write(display, CDCLK_CTL, bxt_cdclk_ctl(display, cdclk_config, pipe)); if (pipe != INVALID_PIPE) - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe)); } -static void bxt_set_cdclk(struct drm_i915_private *dev_priv, +static void bxt_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_config mid_cdclk_config; int cdclk = cdclk_config->cdclk; int ret = 0; @@ -2080,9 +2143,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * mailbox communication, skip * this step. */ - if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) + if (DISPLAY_VER(display) >= 14 || IS_DG2(dev_priv)) /* NOOP */; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, @@ -2097,35 +2160,35 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 0x80000000, 150, 2); if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to inform PCU about cdclk change (err %d, freq %d)\n", ret, cdclk); return; } - if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk) - xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (DISPLAY_VER(display) >= 20 && cdclk < display->cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config); - if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw, + if (cdclk_compute_crawl_and_squash_midpoint(display, &display->cdclk.hw, cdclk_config, &mid_cdclk_config)) { - _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe); - _bxt_set_cdclk(dev_priv, cdclk_config, pipe); + _bxt_set_cdclk(display, &mid_cdclk_config, pipe); + _bxt_set_cdclk(display, cdclk_config, pipe); } else { - _bxt_set_cdclk(dev_priv, cdclk_config, pipe); + _bxt_set_cdclk(display, cdclk_config, pipe); } - if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk) - xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (DISPLAY_VER(display) >= 20 && cdclk > display->cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) /* * NOOP - No Pcode communication needed for * Display versions 14 and beyond */; - else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv)) + else if (DISPLAY_VER(display) >= 11 && !IS_DG2(dev_priv)) ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); - if (DISPLAY_VER(dev_priv) < 11) { + if (DISPLAY_VER(display) < 11) { /* * The timeout isn't specified, the 2ms used here is based on * experiment. @@ -2138,42 +2201,42 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 150, 2); } if (ret) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "PCode CDCLK freq set failed, (err %d, freq %d)\n", ret, cdclk); return; } - intel_update_cdclk(dev_priv); + intel_update_cdclk(display); - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) /* * Can't read out the voltage level :( * Let's just assume everything is as expected. */ - dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level; + display->cdclk.hw.voltage_level = cdclk_config->voltage_level; } -static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct intel_display *display) { u32 cdctl, expected; int cdclk, vco; - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); - if (dev_priv->display.cdclk.hw.vco == 0 || - dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) + if (display->cdclk.hw.vco == 0 || + display->cdclk.hw.cdclk == display->cdclk.hw.bypass) goto sanitize; /* Make sure this is a legal cdclk value for the platform */ - cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); - if (cdclk != dev_priv->display.cdclk.hw.cdclk) + cdclk = bxt_calc_cdclk(display, display->cdclk.hw.cdclk); + if (cdclk != display->cdclk.hw.cdclk) goto sanitize; /* Make sure the VCO is correct for the cdclk */ - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - if (vco != dev_priv->display.cdclk.hw.vco) + vco = bxt_calc_cdclk_pll_vco(display, cdclk); + if (vco != display->cdclk.hw.vco) goto sanitize; /* @@ -2181,129 +2244,133 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, * so sanitize this register. */ - cdctl = intel_de_read(dev_priv, CDCLK_CTL); - expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE); + cdctl = intel_de_read(display, CDCLK_CTL); + expected = bxt_cdclk_ctl(display, &display->cdclk.hw, INVALID_PIPE); /* * Let's ignore the pipe field, since BIOS could have configured the * dividers both synching to an active pipe, or asynchronously * (PIPE_NONE). */ - cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); - expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); + cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE); + expected &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE); if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: - drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->display.cdclk.hw.cdclk = 0; + display->cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = ~0; + display->cdclk.hw.vco = ~0; } -static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) +static void bxt_cdclk_init_hw(struct intel_display *display) { struct intel_cdclk_config cdclk_config; - bxt_sanitize_cdclk(dev_priv); + bxt_sanitize_cdclk(display); - if (dev_priv->display.cdclk.hw.cdclk != 0 && - dev_priv->display.cdclk.hw.vco != 0) + if (display->cdclk.hw.cdclk != 0 && + display->cdclk.hw.vco != 0) return; - cdclk_config = dev_priv->display.cdclk.hw; + cdclk_config = display->cdclk.hw; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ - cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); - cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); + cdclk_config.cdclk = bxt_calc_cdclk(display, 0); + cdclk_config.vco = bxt_calc_cdclk_pll_vco(display, cdclk_config.cdclk); cdclk_config.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE); } -static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) +static void bxt_cdclk_uninit_hw(struct intel_display *display) { - struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; + struct intel_cdclk_config cdclk_config = display->cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); + bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE); } /** * intel_cdclk_init_hw - Initialize CDCLK hardware - * @i915: i915 device + * @display: display instance * - * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and + * Initialize CDCLK. This consists mainly of initializing display->cdclk.hw and * sanitizing the state of the hardware if needed. This is generally done only * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. */ -void intel_cdclk_init_hw(struct drm_i915_private *i915) +void intel_cdclk_init_hw(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) - bxt_cdclk_init_hw(i915); - else if (DISPLAY_VER(i915) == 9) - skl_cdclk_init_hw(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915)) + bxt_cdclk_init_hw(display); + else if (DISPLAY_VER(display) == 9) + skl_cdclk_init_hw(display); } /** * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware - * @i915: i915 device + * @display: display instance * * Uninitialize CDCLK. This is done only during the display core * uninitialization sequence. */ -void intel_cdclk_uninit_hw(struct drm_i915_private *i915) +void intel_cdclk_uninit_hw(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) - bxt_cdclk_uninit_hw(i915); - else if (DISPLAY_VER(i915) == 9) - skl_cdclk_uninit_hw(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915)) + bxt_cdclk_uninit_hw(display); + else if (DISPLAY_VER(display) == 9) + skl_cdclk_uninit_hw(display); } -static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915, +static bool intel_cdclk_can_crawl_and_squash(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { u16 old_waveform; u16 new_waveform; - drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco)); + drm_WARN_ON(display->drm, cdclk_pll_is_unknown(a->vco)); if (a->vco == 0 || b->vco == 0) return false; - if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) + if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display)) return false; - old_waveform = cdclk_squash_waveform(i915, a->cdclk); - new_waveform = cdclk_squash_waveform(i915, b->cdclk); + old_waveform = cdclk_squash_waveform(display, a->cdclk); + new_waveform = cdclk_squash_waveform(display, b->cdclk); return a->vco != b->vco && old_waveform != new_waveform; } -static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_crawl(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { int a_div, b_div; - if (!HAS_CDCLK_CRAWL(dev_priv)) + if (!HAS_CDCLK_CRAWL(display)) return false; /* @@ -2319,7 +2386,7 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, a->ref == b->ref; } -static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_squash(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { @@ -2329,7 +2396,7 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, * the moment all platforms with squasher use a fixed cd2x * divider. */ - if (!HAS_CDCLK_SQUASH(dev_priv)) + if (!HAS_CDCLK_SQUASH(display)) return false; return a->cdclk != b->cdclk && @@ -2358,7 +2425,7 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, /** * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK * configurations requires only a cd2x divider update - * @dev_priv: i915 device + * @display: display instance * @a: first CDCLK configuration * @b: second CDCLK configuration * @@ -2366,12 +2433,14 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, * True if changing between the two CDCLK configurations * can be done with just a cd2x divider update, false if not. */ -static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, +static bool intel_cdclk_can_cd2x_update(struct intel_display *display, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* Older hw doesn't have the capability */ - if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) + if (DISPLAY_VER(display) < 10 && !IS_BROXTON(dev_priv)) return false; /* @@ -2380,7 +2449,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, * the moment all platforms with squasher use a fixed cd2x * divider. */ - if (HAS_CDCLK_SQUASH(dev_priv)) + if (HAS_CDCLK_SQUASH(display)) return false; return a->cdclk != b->cdclk && @@ -2404,23 +2473,24 @@ static bool intel_cdclk_changed(const struct intel_cdclk_config *a, a->voltage_level != b->voltage_level; } -void intel_cdclk_dump_config(struct drm_i915_private *i915, +void intel_cdclk_dump_config(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, const char *context) { - drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", + drm_dbg_kms(display->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", context, cdclk_config->cdclk, cdclk_config->vco, cdclk_config->ref, cdclk_config->bypass, cdclk_config->voltage_level); } -static void intel_pcode_notify(struct drm_i915_private *i915, +static void intel_pcode_notify(struct intel_display *display, u8 voltage_level, u8 active_pipe_count, u16 cdclk, bool cdclk_update_valid, bool pipe_count_update_valid) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 update_mask = 0; @@ -2441,26 +2511,27 @@ static void intel_pcode_notify(struct drm_i915_private *i915, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) - drm_err(&i915->drm, + drm_err(display->drm, "Failed to inform PCU about display config (err %d)\n", ret); } -static void intel_set_cdclk(struct drm_i915_private *dev_priv, +static void intel_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe, const char *context) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config)) + if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) + if (drm_WARN_ON_ONCE(display->drm, !display->funcs.cdclk->set_cdclk)) return; - intel_cdclk_dump_config(dev_priv, cdclk_config, context); + intel_cdclk_dump_config(display, cdclk_config, context); - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_pause(intel_dp); @@ -2473,24 +2544,24 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, * functions use cdclk. Not all platforms/ports do, * but we'll lock them all for simplicity. */ - mutex_lock(&dev_priv->display.gmbus.mutex); - for_each_intel_dp(&dev_priv->drm, encoder) { + mutex_lock(&display->gmbus.mutex); + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, - &dev_priv->display.gmbus.mutex); + &display->gmbus.mutex); } - intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); + intel_cdclk_set_cdclk(display, cdclk_config, pipe); - for_each_intel_dp(&dev_priv->drm, encoder) { + for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_unlock(&intel_dp->aux.hw_mutex); } - mutex_unlock(&dev_priv->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_resume(intel_dp); @@ -2498,17 +2569,17 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_audio_cdclk_change_post(dev_priv); - if (drm_WARN(&dev_priv->drm, - intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config), + if (drm_WARN(display->drm, + intel_cdclk_changed(&display->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]"); - intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); + intel_cdclk_dump_config(display, &display->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(display, cdclk_config, "[sw state]"); } } static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2547,13 +2618,13 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); - intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, change_cdclk, update_pipe_count); } static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_cdclk_state *old_cdclk_state = @@ -2584,7 +2655,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); - intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, + intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk, update_cdclk, update_pipe_count); } @@ -2609,7 +2680,8 @@ bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state) void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2646,9 +2718,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) */ cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus; - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(display->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &cdclk_config, pipe, + intel_set_cdclk(display, &cdclk_config, pipe, "Pre changing CDCLK to"); } @@ -2662,7 +2734,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = @@ -2682,20 +2755,21 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) else pipe = INVALID_PIPE; - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + drm_WARN_ON(display->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &new_cdclk_state->actual, pipe, + intel_set_cdclk(display, &new_cdclk_state->actual, pipe, "Post changing CDCLK to"); } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *dev_priv = to_i915(display->drm); int pixel_rate = crtc_state->pixel_rate; - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) return DIV_ROUND_UP(pixel_rate, 2); - else if (DISPLAY_VER(dev_priv) == 9 || + else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; else if (IS_CHERRYVIEW(dev_priv)) @@ -2709,11 +2783,11 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_plane *plane; int min_cdclk = 0; - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + for_each_intel_plane_on_crtc(display->drm, crtc, plane) min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); return min_cdclk; @@ -2722,7 +2796,7 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); int min_cdclk = 0; @@ -2751,7 +2825,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) * Since PPC = 2 with bigjoiner * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits */ - int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; + int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24; int min_cdclk_bj = (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) * pixel_clock) / (2 * bigjoiner_interface_bits); @@ -2764,8 +2838,8 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *dev_priv = to_i915(display->drm); int min_cdclk; if (!crtc_state->hw.enable) @@ -2786,10 +2860,10 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) crtc_state->has_audio && crtc_state->port_clock >= 540000 && crtc_state->lane_count == 4) { - if (DISPLAY_VER(dev_priv) == 10) { + if (DISPLAY_VER(display) == 10) { /* Display WA #1145: glk */ min_cdclk = max(316800, min_cdclk); - } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) { + } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) { /* Display WA #1144: skl,bxt */ min_cdclk = max(432000, min_cdclk); } @@ -2799,7 +2873,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. */ - if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9) + if (crtc_state->has_audio && DISPLAY_VER(display) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* @@ -2841,7 +2915,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) static int intel_compute_min_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_bw_state *bw_state; @@ -2884,7 +2959,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) min_cdclk = max(cdclk_state->force_min_cdclk, cdclk_state->bw_min_cdclk); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); /* @@ -2899,10 +2974,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) !is_power_of_2(cdclk_state->active_pipes)) min_cdclk = max(2 * 96000, min_cdclk); - if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { - drm_dbg_kms(&dev_priv->drm, + if (min_cdclk > display->cdclk.max_cdclk_freq) { + drm_dbg_kms(display->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", - min_cdclk, dev_priv->display.cdclk.max_cdclk_freq); + min_cdclk, display->cdclk.max_cdclk_freq); return -EINVAL; } @@ -2924,7 +2999,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) */ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); struct intel_crtc *crtc; @@ -2952,7 +3027,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) } min_voltage_level = 0; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) min_voltage_level = max(cdclk_state->min_voltage_level[pipe], min_voltage_level); @@ -2961,7 +3036,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); int min_cdclk, cdclk; @@ -2970,18 +3045,18 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); + cdclk = vlv_calc_cdclk(display, min_cdclk); cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = - vlv_calc_voltage_level(dev_priv, cdclk); + vlv_calc_voltage_level(display, cdclk); if (!cdclk_state->active_pipes) { - cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); + cdclk = vlv_calc_cdclk(display, cdclk_state->force_min_cdclk); cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = - vlv_calc_voltage_level(dev_priv, cdclk); + vlv_calc_voltage_level(display, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } @@ -3020,7 +3095,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) static int skl_dpll0_vco(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); struct intel_crtc *crtc; @@ -3029,7 +3104,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state) vco = cdclk_state->logical.vco; if (!vco) - vco = dev_priv->display.cdclk.skl_preferred_vco_freq; + vco = display->cdclk.skl_preferred_vco_freq; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.enable) @@ -3091,7 +3166,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); int min_cdclk, min_voltage_level, cdclk, vco; @@ -3104,23 +3179,23 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_voltage_level < 0) return min_voltage_level; - cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(display, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(display, cdclk); cdclk_state->logical.vco = vco; cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = max_t(int, min_voltage_level, - intel_cdclk_calc_voltage_level(dev_priv, cdclk)); + intel_cdclk_calc_voltage_level(display, cdclk)); if (!cdclk_state->active_pipes) { - cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(display, cdclk_state->force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(display, cdclk); cdclk_state->actual.vco = vco; cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = - intel_cdclk_calc_voltage_level(dev_priv, cdclk); + intel_cdclk_calc_voltage_level(display, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } @@ -3172,10 +3247,10 @@ static const struct intel_global_state_funcs intel_cdclk_funcs = { struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *cdclk_state; - cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj); + cdclk_state = intel_atomic_get_global_obj_state(state, &display->cdclk.obj); if (IS_ERR(cdclk_state)) return ERR_CAST(cdclk_state); @@ -3231,24 +3306,26 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi return intel_atomic_lock_global_state(&cdclk_state->base); } -int intel_cdclk_init(struct drm_i915_private *dev_priv) +int intel_cdclk_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state; cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); if (!cdclk_state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj, + intel_atomic_global_obj_init(dev_priv, &display->cdclk.obj, &cdclk_state->base, &intel_cdclk_funcs); return 0; } -static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, +static bool intel_cdclk_need_serialize(struct intel_display *display, const struct intel_cdclk_state *old_cdclk_state, const struct intel_cdclk_state *new_cdclk_state) { + struct drm_i915_private *i915 = to_i915(display->drm); bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) != hweight8(new_cdclk_state->active_pipes); bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual, @@ -3262,7 +3339,7 @@ static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_cdclk_state *old_cdclk_state; struct intel_cdclk_state *new_cdclk_state; enum pipe pipe = INVALID_PIPE; @@ -3281,7 +3358,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; - if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) { + if (intel_cdclk_need_serialize(display, old_cdclk_state, new_cdclk_state)) { /* * Also serialize commits across all crtcs * if the actual hw needs to be poked. @@ -3301,14 +3378,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) } if (is_power_of_2(new_cdclk_state->active_pipes) && - intel_cdclk_can_cd2x_update(dev_priv, + intel_cdclk_can_cd2x_update(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) @@ -3318,25 +3395,25 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) pipe = INVALID_PIPE; } - if (intel_cdclk_can_crawl_and_squash(dev_priv, + if (intel_cdclk_can_crawl_and_squash(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via crawling and squashing\n"); - } else if (intel_cdclk_can_squash(dev_priv, + } else if (intel_cdclk_can_squash(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via squashing\n"); - } else if (intel_cdclk_can_crawl(dev_priv, + } else if (intel_cdclk_can_crawl(display, &old_cdclk_state->actual, &new_cdclk_state->actual)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk via crawling\n"); } else if (pipe != INVALID_PIPE) { new_cdclk_state->pipe = pipe; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Can change cdclk cd2x divider with pipe %c active\n", pipe_name(pipe)); } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual, @@ -3348,24 +3425,24 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) new_cdclk_state->disable_pipes = true; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Modeset required for cdclk change\n"); } - if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) != - intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) { - int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual); + if (intel_mdclk_cdclk_ratio(display, &old_cdclk_state->actual) != + intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual)) { + int ratio = intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual); ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio); if (ret) return ret; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "New cdclk calculated to be logical %u kHz, actual %u kHz\n", new_cdclk_state->logical.cdclk, new_cdclk_state->actual.cdclk); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "New voltage level calculated to be logical %u, actual %u\n", new_cdclk_state->logical.voltage_level, new_cdclk_state->actual.voltage_level); @@ -3373,18 +3450,19 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } -static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) +static int intel_compute_max_dotclk(struct intel_display *display) { - int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq; + struct drm_i915_private *dev_priv = to_i915(display->drm); + int max_cdclk_freq = display->cdclk.max_cdclk_freq; - if (DISPLAY_VER(dev_priv) >= 10) + if (DISPLAY_VER(display) >= 10) return 2 * max_cdclk_freq; - else if (DISPLAY_VER(dev_priv) == 9 || + else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) return max_cdclk_freq*95/100; - else if (DISPLAY_VER(dev_priv) < 4) + else if (DISPLAY_VER(display) < 4) return 2*max_cdclk_freq*90/100; else return max_cdclk_freq*90/100; @@ -3392,34 +3470,38 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) /** * intel_update_max_cdclk - Determine the maximum support CDCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the maximum CDCLK frequency the platform supports, and also * derive the maximum dot clock frequency the maximum CDCLK frequency * allows. */ -void intel_update_max_cdclk(struct drm_i915_private *dev_priv) +void intel_update_max_cdclk(struct intel_display *display) { - if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - if (dev_priv->display.cdclk.hw.ref == 24000) - dev_priv->display.cdclk.max_cdclk_freq = 552000; + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 30) { + display->cdclk.max_cdclk_freq = 691200; + } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { + if (display->cdclk.hw.ref == 24000) + display->cdclk.max_cdclk_freq = 552000; else - dev_priv->display.cdclk.max_cdclk_freq = 556800; - } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->display.cdclk.hw.ref == 24000) - dev_priv->display.cdclk.max_cdclk_freq = 648000; + display->cdclk.max_cdclk_freq = 556800; + } else if (DISPLAY_VER(display) >= 11) { + if (display->cdclk.hw.ref == 24000) + display->cdclk.max_cdclk_freq = 648000; else - dev_priv->display.cdclk.max_cdclk_freq = 652800; + display->cdclk.max_cdclk_freq = 652800; } else if (IS_GEMINILAKE(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 316800; + display->cdclk.max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 624000; - } else if (DISPLAY_VER(dev_priv) == 9) { - u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + display->cdclk.max_cdclk_freq = 624000; + } else if (DISPLAY_VER(display) == 9) { + u32 limit = intel_de_read(display, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; - vco = dev_priv->display.cdclk.skl_preferred_vco_freq; - drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); + vco = display->cdclk.skl_preferred_vco_freq; + drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000); /* * Use the lower (vco 8640) cdclk values as a @@ -3435,7 +3517,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) else max_cdclk = 308571; - dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); + display->cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow @@ -3443,41 +3525,43 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) * How can we know if extra cooling is * available? PCI ID, VTB, something else? */ - if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) - dev_priv->display.cdclk.max_cdclk_freq = 450000; + if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT) + display->cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULX(dev_priv)) - dev_priv->display.cdclk.max_cdclk_freq = 450000; + display->cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULT(dev_priv)) - dev_priv->display.cdclk.max_cdclk_freq = 540000; + display->cdclk.max_cdclk_freq = 540000; else - dev_priv->display.cdclk.max_cdclk_freq = 675000; + display->cdclk.max_cdclk_freq = 675000; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 320000; + display->cdclk.max_cdclk_freq = 320000; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.cdclk.max_cdclk_freq = 400000; + display->cdclk.max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ - dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; + display->cdclk.max_cdclk_freq = display->cdclk.hw.cdclk; } - dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv); + display->cdclk.max_dotclk_freq = intel_compute_max_dotclk(display); - drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", - dev_priv->display.cdclk.max_cdclk_freq); + drm_dbg(display->drm, "Max CD clock rate: %d kHz\n", + display->cdclk.max_cdclk_freq); - drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", - dev_priv->display.cdclk.max_dotclk_freq); + drm_dbg(display->drm, "Max dotclock rate: %d kHz\n", + display->cdclk.max_dotclk_freq); } /** * intel_update_cdclk - Determine the current CDCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the current CDCLK frequency. */ -void intel_update_cdclk(struct drm_i915_private *dev_priv) +void intel_update_cdclk(struct intel_display *display) { - intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw); + struct drm_i915_private *dev_priv = to_i915(display->drm); + + intel_cdclk_get_cdclk(display, &display->cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): @@ -3486,28 +3570,29 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) * generate GMBus clock. This will vary with the cdclk freq. */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_de_write(dev_priv, GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000)); + intel_de_write(display, GMBUSFREQ_VLV, + DIV_ROUND_UP(display->cdclk.hw.cdclk, 1000)); } -static int dg1_rawclk(struct drm_i915_private *dev_priv) +static int dg1_rawclk(struct intel_display *display) { /* * DG1 always uses a 38.4 MHz rawclk. The bspec tells us * "Program Numerator=2, Denominator=4, Divider=37 decimal." */ - intel_de_write(dev_priv, PCH_RAWCLK_FREQ, + intel_de_write(display, PCH_RAWCLK_FREQ, CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); return 38400; } -static int cnp_rawclk(struct drm_i915_private *dev_priv) +static int cnp_rawclk(struct intel_display *display) { - u32 rawclk; + struct drm_i915_private *dev_priv = to_i915(display->drm); int divider, fraction; + u32 rawclk; - if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { + if (intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { /* 24 MHz */ divider = 24000; fraction = 0; @@ -3527,37 +3612,42 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) rawclk |= ICP_RAWCLK_NUM(numerator); } - intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); + intel_de_write(display, PCH_RAWCLK_FREQ, rawclk); return divider + fraction; } -static int pch_rawclk(struct drm_i915_private *dev_priv) +static int pch_rawclk(struct intel_display *display) { - return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; + return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } -static int vlv_hrawclk(struct drm_i915_private *dev_priv) +static int vlv_hrawclk(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } -static int i9xx_hrawclk(struct drm_i915_private *i915) +static int i9xx_hrawclk(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + /* hrawclock is 1/4 the FSB frequency */ return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4); } /** * intel_read_rawclk - Determine the current RAWCLK frequency - * @dev_priv: i915 device + * @display: display instance * * Determine the current RAWCLK frequency. RAWCLK is a fixed * frequency clock so this needs to done only once. */ -u32 intel_read_rawclk(struct drm_i915_private *dev_priv) +u32 intel_read_rawclk(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 freq; if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL) @@ -3568,15 +3658,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) */ freq = 38400; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) - freq = dg1_rawclk(dev_priv); + freq = dg1_rawclk(display); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - freq = cnp_rawclk(dev_priv); + freq = cnp_rawclk(display); else if (HAS_PCH_SPLIT(dev_priv)) - freq = pch_rawclk(dev_priv); + freq = pch_rawclk(display); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - freq = vlv_hrawclk(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 3) - freq = i9xx_hrawclk(dev_priv); + freq = vlv_hrawclk(display); + else if (DISPLAY_VER(display) >= 3) + freq = i9xx_hrawclk(display); else /* no rawclk on other platforms, or no need to know it */ return 0; @@ -3586,25 +3676,32 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) static int i915_cdclk_info_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; + struct intel_display *display = m->private; - seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); - seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); - seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq); + seq_printf(m, "Current CD clock frequency: %d kHz\n", display->cdclk.hw.cdclk); + seq_printf(m, "Max CD clock frequency: %d kHz\n", display->cdclk.max_cdclk_freq); + seq_printf(m, "Max pixel clock frequency: %d kHz\n", display->cdclk.max_dotclk_freq); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info); -void intel_cdclk_debugfs_register(struct drm_i915_private *i915) +void intel_cdclk_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root, - i915, &i915_cdclk_info_fops); + display, &i915_cdclk_info_fops); } +static const struct intel_cdclk_funcs xe3lpd_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = xe3lpd_calc_voltage_level, +}; + static const struct intel_cdclk_funcs rplu_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3743,97 +3840,102 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { /** * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks - * @dev_priv: i915 device + * @display: display instance */ -void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 20) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = xe2lpd_cdclk_table; - } else if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1)) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = xe2hpd_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 14) { - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; - dev_priv->display.cdclk.table = mtl_cdclk_table; +void intel_init_cdclk_hooks(struct intel_display *display) +{ + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 30) { + display->funcs.cdclk = &xe3lpd_cdclk_funcs; + display->cdclk.table = xe3lpd_cdclk_table; + } else if (DISPLAY_VER(display) >= 20) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = xe2lpd_cdclk_table; + } else if (DISPLAY_VERx100(display) >= 1401) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = xe2hpd_cdclk_table; + } else if (DISPLAY_VER(display) >= 14) { + display->funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = mtl_cdclk_table; } else if (IS_DG2(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = dg2_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { /* Wa_22011320316:adl-p[a0] */ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { - dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = adlp_a_step_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; } else if (IS_RAPTORLAKE_U(dev_priv)) { - dev_priv->display.cdclk.table = rplu_cdclk_table; - dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; + display->cdclk.table = rplu_cdclk_table; + display->funcs.cdclk = &rplu_cdclk_funcs; } else { - dev_priv->display.cdclk.table = adlp_cdclk_table; - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = adlp_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; } } else if (IS_ROCKETLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = rkl_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 12) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = rkl_cdclk_table; + } else if (DISPLAY_VER(display) >= 12) { + display->funcs.cdclk = &tgl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; - } else if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->display.funcs.cdclk = &icl_cdclk_funcs; - dev_priv->display.cdclk.table = icl_cdclk_table; + display->funcs.cdclk = &ehl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; + } else if (DISPLAY_VER(display) >= 11) { + display->funcs.cdclk = &icl_cdclk_funcs; + display->cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs; + display->funcs.cdclk = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) - dev_priv->display.cdclk.table = glk_cdclk_table; + display->cdclk.table = glk_cdclk_table; else - dev_priv->display.cdclk.table = bxt_cdclk_table; - } else if (DISPLAY_VER(dev_priv) == 9) { - dev_priv->display.funcs.cdclk = &skl_cdclk_funcs; + display->cdclk.table = bxt_cdclk_table; + } else if (DISPLAY_VER(display) == 9) { + display->funcs.cdclk = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { - dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs; + display->funcs.cdclk = &bdw_cdclk_funcs; } else if (IS_HASWELL(dev_priv)) { - dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs; + display->funcs.cdclk = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &chv_cdclk_funcs; + display->funcs.cdclk = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs; + display->funcs.cdclk = &vlv_cdclk_funcs; } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_IRONLAKE(dev_priv)) { - dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs; + display->funcs.cdclk = &ilk_cdclk_funcs; } else if (IS_GM45(dev_priv)) { - dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs; + display->funcs.cdclk = &gm45_cdclk_funcs; } else if (IS_G45(dev_priv)) { - dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; + display->funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I965GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs; + display->funcs.cdclk = &i965gm_cdclk_funcs; } else if (IS_I965G(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_PINEVIEW(dev_priv)) { - dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs; + display->funcs.cdclk = &pnv_cdclk_funcs; } else if (IS_G33(dev_priv)) { - dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; + display->funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I945GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs; + display->funcs.cdclk = &i945gm_cdclk_funcs; } else if (IS_I945G(dev_priv)) { - dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; + display->funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_I915GM(dev_priv)) { - dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs; + display->funcs.cdclk = &i915gm_cdclk_funcs; } else if (IS_I915G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs; + display->funcs.cdclk = &i915g_cdclk_funcs; } else if (IS_I865G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs; + display->funcs.cdclk = &i865g_cdclk_funcs; } else if (IS_I85X(dev_priv)) { - dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs; + display->funcs.cdclk = &i85x_cdclk_funcs; } else if (IS_I845G(dev_priv)) { - dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs; + display->funcs.cdclk = &i845g_cdclk_funcs; } else if (IS_I830(dev_priv)) { - dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; + display->funcs.cdclk = &i830_cdclk_funcs; } - if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk, + if (drm_WARN(display->drm, !display->funcs.cdclk, "Unknown platform. Assuming i830\n")) - dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; + display->funcs.cdclk = &i830_cdclk_funcs; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index cfdcdec07a4d..6b0e7a41eba3 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -11,9 +11,9 @@ #include "intel_display_limits.h" #include "intel_global_state.h" -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; +struct intel_display; struct intel_cdclk_config { unsigned int cdclk, vco, ref, bypass; @@ -59,24 +59,24 @@ struct intel_cdclk_state { }; int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); -void intel_cdclk_init_hw(struct drm_i915_private *i915); -void intel_cdclk_uninit_hw(struct drm_i915_private *i915); -void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); -void intel_update_max_cdclk(struct drm_i915_private *dev_priv); -void intel_update_cdclk(struct drm_i915_private *dev_priv); -u32 intel_read_rawclk(struct drm_i915_private *dev_priv); +void intel_cdclk_init_hw(struct intel_display *display); +void intel_cdclk_uninit_hw(struct intel_display *display); +void intel_init_cdclk_hooks(struct intel_display *display); +void intel_update_max_cdclk(struct intel_display *display); +void intel_update_cdclk(struct intel_display *display); +u32 intel_read_rawclk(struct intel_display *display); bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b); -int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, +int intel_mdclk_cdclk_ratio(struct intel_display *display, const struct intel_cdclk_config *cdclk_config); bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state); void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); -void intel_cdclk_dump_config(struct drm_i915_private *i915, +void intel_cdclk_dump_config(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); -void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, +void intel_cdclk_get_cdclk(struct intel_display *display, struct intel_cdclk_config *cdclk_config); int intel_cdclk_atomic_check(struct intel_atomic_state *state, bool *need_cdclk_calc); @@ -88,11 +88,11 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state); container_of_const((global_state), struct intel_cdclk_state, base) #define intel_atomic_get_old_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->cdclk.obj)) #define intel_atomic_get_new_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->cdclk.obj)) -int intel_cdclk_init(struct drm_i915_private *dev_priv); -void intel_cdclk_debugfs_register(struct drm_i915_private *i915); +int intel_cdclk_init(struct intel_display *display); +void intel_cdclk_debugfs_register(struct intel_display *display); #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 5d701f48351b..174753625bca 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -39,7 +39,8 @@ struct intel_color_funcs { * the next vblank start, alongside any other double buffered * registers involved with the same commit. This hook is optional. */ - void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state); + void (*color_commit_noarm)(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* * Program arming double buffered color management registers * during vblank evasion. The registers (and whatever other registers @@ -47,7 +48,8 @@ struct intel_color_funcs { * during the next vblank start, alongside any other double buffered * registers involved with the same commit. */ - void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); + void (*color_commit_arm)(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* * Perform any extra tasks needed after all the * double buffered registers have been latched. @@ -205,74 +207,81 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) return result; } -static void ilk_update_pipe_csc(struct intel_crtc *crtc, +static void ilk_update_pipe_csc(struct intel_dsb *dsb, + struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe), - csc->coeff[0] << 16 | csc->coeff[1]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), - csc->coeff[2] << 16); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe), - csc->coeff[3] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), - csc->coeff[5] << 16); - - intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe), - csc->coeff[6] << 16 | csc->coeff[7]); - intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), - csc->coeff[8] << 16); - - if (DISPLAY_VER(i915) < 7) + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_HI(pipe), + csc->preoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_ME(pipe), + csc->preoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_LO(pipe), + csc->preoff[2]); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RY_GY(pipe), + csc->coeff[0] << 16 | csc->coeff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BY(pipe), + csc->coeff[2] << 16); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RU_GU(pipe), + csc->coeff[3] << 16 | csc->coeff[4]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BU(pipe), + csc->coeff[5] << 16); + + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RV_GV(pipe), + csc->coeff[6] << 16 | csc->coeff[7]); + intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BV(pipe), + csc->coeff[8] << 16); + + if (DISPLAY_VER(display) < 7) return; - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_HI(pipe), + csc->postoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_ME(pipe), + csc->postoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_LO(pipe), + csc->postoff[2]); } static void ilk_read_pipe_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe)); - csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe)); - csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe)); + csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_PREOFF_LO(pipe)); - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; - if (DISPLAY_VER(i915) < 7) + if (DISPLAY_VER(display) < 7) return; - csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe)); - csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe)); - csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe)); + csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_LO(pipe)); } static void ilk_read_csc(struct intel_crtc_state *crtc_state) @@ -304,68 +313,75 @@ static void skl_read_csc(struct intel_crtc_state *crtc_state) ilk_read_pipe_csc(crtc, &crtc_state->csc); } -static void icl_update_output_csc(struct intel_crtc *crtc, +static void icl_update_output_csc(struct intel_dsb *dsb, + struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), + csc->preoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), + csc->preoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), + csc->preoff[2]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), - csc->coeff[0] << 16 | csc->coeff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe), - csc->coeff[2] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), + csc->coeff[0] << 16 | csc->coeff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BY(pipe), + csc->coeff[2] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), - csc->coeff[3] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe), - csc->coeff[5] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), + csc->coeff[3] << 16 | csc->coeff[4]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BU(pipe), + csc->coeff[5] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), - csc->coeff[6] << 16 | csc->coeff[7]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe), - csc->coeff[8] << 16); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), + csc->coeff[6] << 16 | csc->coeff[7]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BV(pipe), + csc->coeff[8] << 16); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]); - intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), + csc->postoff[0]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), + csc->postoff[1]); + intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), + csc->postoff[2]); } static void icl_read_output_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); - csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); - csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); + csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); + csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); + csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; - tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); + tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; - csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); - csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); - csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); + csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); + csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); + csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); } static void icl_read_csc(struct intel_crtc_state *crtc_state) @@ -386,14 +402,15 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state) static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(display->drm); /* icl+ have dedicated output CSC */ - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) return false; /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ - if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) + if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915)) return false; return crtc_state->limited_color_range; @@ -401,7 +418,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (!ilk_limited_range(crtc_state)) return false; @@ -409,7 +426,7 @@ static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state) if (crtc_state->c8_planes) return false; - if (DISPLAY_VER(i915) == 10) + if (DISPLAY_VER(display) == 10) return crtc_state->hw.gamma_lut; else return crtc_state->hw.gamma_lut && @@ -424,13 +441,13 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) return !ilk_lut_limited_range(crtc_state); } -static void ilk_csc_copy(struct drm_i915_private *i915, +static void ilk_csc_copy(struct intel_display *display, struct intel_csc_matrix *dst, const struct intel_csc_matrix *src) { *dst = *src; - if (DISPLAY_VER(i915) < 7) + if (DISPLAY_VER(display) < 7) memset(dst->postoff, 0, sizeof(dst->postoff)); } @@ -438,7 +455,7 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, struct intel_csc_matrix *csc, bool limited_color_range) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; const u64 *input; u64 temp[9]; @@ -446,9 +463,9 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, /* for preoff/postoff */ if (limited_color_range) - ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, csc, &ilk_csc_matrix_limited_range); else - ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity); + ilk_csc_copy(display, csc, &ilk_csc_matrix_identity); if (limited_color_range) input = ctm_mult_by_limited(temp, ctm->matrix); @@ -496,21 +513,22 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(display->drm); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range); } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (limited_color_range) { - drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); + drm_WARN_ON(display->drm, !crtc_state->csc_enable); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_limited_range); } else if (crtc_state->csc_enable) { /* * On GLK both pipe CSC and degamma LUT are controlled @@ -518,60 +536,62 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state) * LUT is needed but CSC is not we need to load an * identity matrix. */ - drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915)); + drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915)); - ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity); + ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity); } else { intel_csc_clear(&crtc_state->csc); } } -static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void ilk_load_csc_matrix(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_enable) - ilk_update_pipe_csc(crtc, &crtc_state->csc); + ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc); } static void icl_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false); } else { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->csc); } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); - ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); + ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (crtc_state->limited_color_range) { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); - ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); + ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); } else { - drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); + drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->output_csc); } } -static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void icl_load_csc_matrix(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_mode & ICL_CSC_ENABLE) - ilk_update_pipe_csc(crtc, &crtc_state->csc); + ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc); if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) - icl_update_output_csc(crtc, &crtc_state->output_csc); + icl_update_output_csc(dsb, crtc, &crtc_state->output_csc); } static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits) @@ -614,51 +634,51 @@ static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void vlv_load_wgc_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; - intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C01_C00(display, pipe), csc->coeff[1] << 16 | csc->coeff[0]); - intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C02(display, pipe), csc->coeff[2]); - intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C11_C10(display, pipe), csc->coeff[4] << 16 | csc->coeff[3]); - intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C12(display, pipe), csc->coeff[5]); - intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C21_C20(display, pipe), csc->coeff[7] << 16 | csc->coeff[6]); - intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe), + intel_de_write_fw(display, PIPE_WGC_C22(display, pipe), csc->coeff[8]); } static void vlv_read_wgc_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C01_C00(display, pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C02(display, pipe)); csc->coeff[2] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C11_C10(display, pipe)); csc->coeff[3] = tmp & 0xffff; csc->coeff[4] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C12(display, pipe)); csc->coeff[5] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C21_C20(display, pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe)); + tmp = intel_de_read_fw(display, PIPE_WGC_C22(display, pipe)); csc->coeff[8] = tmp & 0xffff; } @@ -672,14 +692,14 @@ static void vlv_read_csc(struct intel_crtc_state *crtc_state) static void vlv_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, !crtc_state->wgc_enable); + drm_WARN_ON(display->drm, !crtc_state->wgc_enable); vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { - drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + drm_WARN_ON(display->drm, crtc_state->wgc_enable); intel_csc_clear(&crtc_state->csc); } @@ -716,45 +736,45 @@ static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = { static void chv_load_cgm_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF01(pipe), csc->coeff[1] << 16 | csc->coeff[0]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF23(pipe), csc->coeff[3] << 16 | csc->coeff[2]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF45(pipe), csc->coeff[5] << 16 | csc->coeff[4]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF67(pipe), csc->coeff[7] << 16 | csc->coeff[6]); - intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe), + intel_de_write_fw(display, CGM_PIPE_CSC_COEFF8(pipe), csc->coeff[8]); } static void chv_read_cgm_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; u32 tmp; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF01(pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF23(pipe)); csc->coeff[2] = tmp & 0xffff; csc->coeff[3] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF45(pipe)); csc->coeff[4] = tmp & 0xffff; csc->coeff[5] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF67(pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; - tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe)); + tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF8(pipe)); csc->coeff[8] = tmp & 0xffff; } @@ -768,16 +788,16 @@ static void chv_read_csc(struct intel_crtc_state *crtc_state) static void chv_assign_csc(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); + drm_WARN_ON(display->drm, crtc_state->wgc_enable); if (crtc_state->hw.ctm) { - drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { - drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); + drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); crtc_state->csc = chv_cgm_csc_matrix_identity; } @@ -953,7 +973,8 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_LDW_MASK, ldw); } -static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void icl_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* * Despite Wa_1406463849, ICL no longer suffers from the SKL @@ -963,10 +984,11 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) * * On TGL+ all CSC arming issues have been properly fixed. */ - icl_load_csc_matrix(crtc_state); + icl_load_csc_matrix(dsb, crtc_state); } -static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void skl_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* * Possibly related to display WA #1184, SKL CSC loses the latched @@ -979,72 +1001,76 @@ static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) * which is called after PSR exit. */ if (!crtc_state->has_psr) - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); } -static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) +static void ilk_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); } -static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void i9xx_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { /* update TRANSCONF GAMMA_MODE */ i9xx_set_pipeconf(crtc_state); } -static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void ilk_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); /* update TRANSCONF GAMMA_MODE */ ilk_set_pipeconf(crtc_state); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), + intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } -static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void hsw_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), + intel_de_write(display, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), + intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static u32 hsw_read_gamma_mode(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - return intel_de_read(i915, GAMMA_MODE(crtc->pipe)); + return intel_de_read(display, GAMMA_MODE(crtc->pipe)); } static u32 ilk_read_csc_mode(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - return intel_de_read(i915, PIPE_CSC_MODE(crtc->pipe)); + return intel_de_read(display, PIPE_CSC_MODE(crtc->pipe)); } static void i9xx_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 tmp; - tmp = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane)); + tmp = intel_de_read(display, DSPCNTR(display, i9xx_plane)); if (tmp & DISP_PIPE_GAMMA_ENABLE) crtc_state->gamma_enable = true; - if (!HAS_GMCH(dev_priv) && tmp & DISP_PIPE_CSC_ENABLE) + if (!HAS_GMCH(display) && tmp & DISP_PIPE_CSC_ENABLE) crtc_state->csc_enable = true; } @@ -1060,14 +1086,14 @@ static void hsw_get_config(struct intel_crtc_state *crtc_state) static void skl_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 tmp; crtc_state->gamma_mode = hsw_read_gamma_mode(crtc); crtc_state->csc_mode = ilk_read_csc_mode(crtc); - tmp = intel_de_read(i915, SKL_BOTTOM_COLOR(crtc->pipe)); + tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe)); if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) crtc_state->gamma_enable = true; @@ -1076,15 +1102,16 @@ static void skl_get_config(struct intel_crtc_state *crtc_state) crtc_state->csc_enable = true; } -static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void skl_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val = 0; if (crtc_state->has_psr) - ilk_load_csc_matrix(crtc_state); + ilk_load_csc_matrix(dsb, crtc_state); /* * We don't (yet) allow userspace to control the pipe background color, @@ -1095,38 +1122,35 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; if (crtc_state->csc_enable) val |= SKL_BOTTOM_COLOR_CSC_ENABLE; - intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), val); + intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), val); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), - crtc_state->gamma_mode); + intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } -static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state) +static void icl_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * We don't (yet) allow userspace to control the pipe background color, * so force it to black. */ - intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0); + intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), 0); - intel_de_write(i915, GAMMA_MODE(crtc->pipe), - crtc_state->gamma_mode); + intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); - intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void icl_color_post_update(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * Despite Wa_1406463849, ICL CSC is no longer disarmed by @@ -1142,17 +1166,17 @@ static void icl_color_post_update(const struct intel_crtc_state *crtc_state) * * TGL+ no longer need this workaround. */ - intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe)); + intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(crtc->pipe)); } static struct drm_property_blob * -create_linear_lut(struct drm_i915_private *i915, int lut_size) +create_linear_lut(struct intel_display *display, int lut_size) { struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -1180,7 +1204,7 @@ static u16 lut_limited_range(unsigned int value) } static struct drm_property_blob * -create_resized_lut(struct drm_i915_private *i915, +create_resized_lut(struct intel_display *display, const struct drm_property_blob *blob_in, int lut_out_size, bool limited_color_range) { @@ -1189,7 +1213,7 @@ create_resized_lut(struct drm_i915_private *i915, const struct drm_color_lut *lut_in; struct drm_color_lut *lut_out; - blob_out = drm_property_create_blob(&i915->drm, + blob_out = drm_property_create_blob(display->drm, sizeof(lut_out[0]) * lut_out_size, NULL); if (IS_ERR(blob_out)) @@ -1217,7 +1241,7 @@ create_resized_lut(struct drm_i915_private *i915, static void i9xx_load_lut_8(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut; enum pipe pipe = crtc->pipe; int i; @@ -1228,24 +1252,24 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i), + intel_de_write_fw(display, PALETTE(display, pipe, i), i9xx_lut_8(&lut[i])); } static void i9xx_load_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 0), i9xx_lut_10_ldw(&lut[i])); - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 1), i9xx_lut_10_udw(&lut[i])); } } @@ -1271,23 +1295,23 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) static void i965_load_lut_10p6(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 0), i965_lut_10p6_ldw(&lut[i])); - intel_de_write_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1), + intel_de_write_fw(display, + PALETTE(display, pipe, 2 * i + 1), i965_lut_10p6_udw(&lut[i])); } - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0), lut[i].red); - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1), lut[i].green); - intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2), lut[i].blue); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 0), lut[i].red); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 1), lut[i].green); + intel_de_write_fw(display, PIPEGCMAX(display, pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) @@ -1311,12 +1335,12 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state) static void ilk_lut_write(const struct intel_crtc_state *crtc_state, i915_reg_t reg, u32 val) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->dsb_color_vblank) intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val); else - intel_de_write_fw(i915, reg, val); + intel_de_write_fw(display, reg, val); } static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, @@ -1523,9 +1547,9 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } } -static int glk_degamma_lut_size(struct drm_i915_private *i915) +static int glk_degamma_lut_size(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return 131; else return 35; @@ -1557,8 +1581,8 @@ static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val) static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; @@ -1589,14 +1613,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, * as compared to just 16 to achieve this. */ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(i915) >= 14 ? + DISPLAY_VER(display) >= 14 ? mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); } /* Clamp values > 1.0. */ - while (i++ < glk_degamma_lut_size(i915)) + while (i++ < glk_degamma_lut_size(display)) ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - DISPLAY_VER(i915) >= 14 ? + DISPLAY_VER(display) >= 14 ? 1 << 24 : 1 << 16); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); @@ -1797,15 +1821,15 @@ static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 0), + intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0), chv_cgm_degamma_ldw(&lut[i])); - intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 1), + intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1), chv_cgm_degamma_udw(&lut[i])); } } @@ -1831,23 +1855,23 @@ static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static void chv_load_cgm_gamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0), + intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 0), chv_cgm_gamma_ldw(&lut[i])); - intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1), + intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 1), chv_cgm_gamma_udw(&lut[i])); } } static void chv_load_luts(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; @@ -1862,50 +1886,66 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) else i965_load_luts(crtc_state); - intel_de_write_fw(i915, CGM_PIPE_MODE(crtc->pipe), + intel_de_write_fw(display, CGM_PIPE_MODE(crtc->pipe), crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (crtc_state->dsb_color_vblank) return; - i915->display.funcs.color->load_luts(crtc_state); + display->funcs.color->load_luts(crtc_state); } -void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state) +void intel_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (i915->display.funcs.color->color_commit_noarm) - i915->display.funcs.color->color_commit_noarm(crtc_state); + if (display->funcs.color->color_commit_noarm) + display->funcs.color->color_commit_noarm(dsb, crtc_state); } -void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) +void intel_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - i915->display.funcs.color->color_commit_arm(crtc_state); + struct intel_display *display = to_intel_display(crtc_state); - if (crtc_state->dsb_color_commit) - intel_dsb_commit(crtc_state->dsb_color_commit, false); + display->funcs.color->color_commit_arm(dsb, crtc_state); } void intel_color_post_update(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + + if (display->funcs.color->color_post_update) + display->funcs.color->color_post_update(crtc_state); +} + +void intel_color_modeset(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + intel_color_load_luts(crtc_state); + intel_color_commit_noarm(NULL, crtc_state); + intel_color_commit_arm(NULL, crtc_state); - if (i915->display.funcs.color->color_post_update) - i915->display.funcs.color->color_post_update(crtc_state); + if (DISPLAY_VER(display) < 9) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + plane->disable_arm(NULL, plane, crtc_state); + } } void intel_color_prepare_commit(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1923,30 +1963,16 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, if (!crtc_state->dsb_color_vblank) return; - i915->display.funcs.color->load_luts(crtc_state); - - intel_dsb_finish(crtc_state->dsb_color_vblank); + display->funcs.color->load_luts(crtc_state); - crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16); - if (!crtc_state->dsb_color_commit) { - intel_dsb_cleanup(crtc_state->dsb_color_vblank); - crtc_state->dsb_color_vblank = NULL; - return; - } + intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank); + intel_dsb_interrupt(crtc_state->dsb_color_vblank); - intel_dsb_chain(state, crtc_state->dsb_color_commit, - crtc_state->dsb_color_vblank, true); - - intel_dsb_finish(crtc_state->dsb_color_commit); + intel_dsb_finish(crtc_state->dsb_color_vblank); } void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) { - if (crtc_state->dsb_color_commit) { - intel_dsb_cleanup(crtc_state->dsb_color_commit); - crtc_state->dsb_color_commit = NULL; - } - if (crtc_state->dsb_color_vblank) { intel_dsb_cleanup(crtc_state->dsb_color_vblank); crtc_state->dsb_color_vblank = NULL; @@ -1955,8 +1981,6 @@ void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) void intel_color_wait_commit(const struct intel_crtc_state *crtc_state) { - if (crtc_state->dsb_color_commit) - intel_dsb_wait(crtc_state->dsb_color_commit); if (crtc_state->dsb_color_vblank) intel_dsb_wait(crtc_state->dsb_color_vblank); } @@ -2008,7 +2032,7 @@ static bool chv_can_preload_luts(struct intel_atomic_state *state, int intel_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -2024,20 +2048,19 @@ int intel_color_check(struct intel_atomic_state *state, if (!intel_crtc_needs_color_update(new_crtc_state)) return 0; - return i915->display.funcs.color->color_check(state, crtc); + return display->funcs.color->color_check(state, crtc); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (i915->display.funcs.color->get_config) - i915->display.funcs.color->get_config(crtc_state); + display->funcs.color->get_config(crtc_state); - i915->display.funcs.color->read_luts(crtc_state); + display->funcs.color->read_luts(crtc_state); - if (i915->display.funcs.color->read_csc) - i915->display.funcs.color->read_csc(crtc_state); + if (display->funcs.color->read_csc) + display->funcs.color->read_csc(crtc_state); } bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, @@ -2045,7 +2068,7 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); /* * FIXME c8_planes readout missing thus @@ -2054,14 +2077,14 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, if (!is_pre_csc_lut && crtc_state->c8_planes) return true; - return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2, - is_pre_csc_lut); + return display->funcs.color->lut_equal(crtc_state, blob1, blob2, + is_pre_csc_lut); } static bool need_plane_update(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane); /* * On pre-SKL the pipe gamma enable and pipe csc enable for @@ -2069,15 +2092,14 @@ static bool need_plane_update(struct intel_plane *plane, * We have to reconfigure that even if the plane is inactive. */ return crtc_state->active_planes & BIT(plane->id) || - (DISPLAY_VER(i915) < 9 && - plane->id == PLANE_PRIMARY); + (DISPLAY_VER(display) < 9 && plane->id == PLANE_PRIMARY); } static int intel_color_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -2092,7 +2114,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, new_crtc_state->csc_enable == old_crtc_state->csc_enable) return 0; - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { struct intel_plane_state *plane_state; if (!need_plane_update(plane, new_crtc_state)) @@ -2107,7 +2129,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, new_crtc_state->do_async_flip = false; /* plane control register changes blocked by CxSR */ - if (HAS_GMCH(i915)) + if (HAS_GMCH(display)) new_crtc_state->disable_cxsr = true; } @@ -2116,43 +2138,44 @@ intel_color_add_affected_planes(struct intel_atomic_state *state, static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return 0; - return DISPLAY_INFO(i915)->color.gamma_lut_tests; + return DISPLAY_INFO(display)->color.gamma_lut_tests; } static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - return DISPLAY_INFO(i915)->color.degamma_lut_tests; + return DISPLAY_INFO(display)->color.degamma_lut_tests; } static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return LEGACY_LUT_LENGTH; - return DISPLAY_INFO(i915)->color.gamma_lut_size; + return DISPLAY_INFO(display)->color.gamma_lut_size; } static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - return DISPLAY_INFO(i915)->color.degamma_lut_size; + return DISPLAY_INFO(display)->color.degamma_lut_size; } -static int check_lut_size(struct drm_i915_private *i915, +static int check_lut_size(struct intel_crtc *crtc, const char *lut_name, const struct drm_property_blob *lut, int expected) { + struct intel_display *display = to_intel_display(crtc); int len; if (!lut) @@ -2160,8 +2183,9 @@ static int check_lut_size(struct drm_i915_private *i915, len = drm_color_lut_size(lut); if (len != expected) { - drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n", - len, expected); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Invalid %s LUT size; got %d, expected %d\n", + crtc->base.base.id, crtc->base.name, lut_name, len, expected); return -EINVAL; } @@ -2171,23 +2195,25 @@ static int check_lut_size(struct drm_i915_private *i915, static int _check_luts(const struct intel_crtc_state *crtc_state, u32 degamma_tests, u32 gamma_tests) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; int gamma_length, degamma_length; /* C8 relies on its palette being stored in the legacy LUT */ if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) { - drm_dbg_kms(&i915->drm, - "C8 pixelformat requires the legacy LUT\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] C8 pixelformat requires the legacy LUT\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } degamma_length = intel_degamma_lut_size(crtc_state); gamma_length = intel_gamma_lut_size(crtc_state); - if (check_lut_size(i915, degamma_lut, degamma_length) || - check_lut_size(i915, gamma_lut, gamma_length)) + if (check_lut_size(crtc, "degamma", degamma_lut, degamma_length) || + check_lut_size(crtc, "gamma", gamma_lut, gamma_length)) return -EINVAL; if (drm_color_lut_check(degamma_lut, degamma_tests) || @@ -2219,9 +2245,10 @@ static int i9xx_lut_10_diff(u16 a, u16 b) drm_color_lut_extract(b, 10); } -static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, +static int i9xx_check_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { + struct intel_display *display = to_intel_display(crtc); const struct drm_color_lut *lut = blob->data; int lut_size = drm_color_lut_size(blob); const struct drm_color_lut *a = &lut[lut_size - 2]; @@ -2230,7 +2257,9 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, if (i9xx_lut_10_diff(b->red, a->red) > 0x7f || i9xx_lut_10_diff(b->green, a->green) > 0x7f || i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) { - drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Last gamma LUT entry exceeds max slope\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2239,28 +2268,28 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, void intel_color_assert_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); /* make sure {pre,post}_csc_lut were correctly assigned */ - if (DISPLAY_VER(i915) >= 11 || HAS_GMCH(i915)) { - drm_WARN_ON(&i915->drm, + if (DISPLAY_VER(display) >= 11 || HAS_GMCH(display)) { + drm_WARN_ON(display->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); - } else if (DISPLAY_VER(i915) == 10) { - drm_WARN_ON(&i915->drm, + } else if (DISPLAY_VER(display) == 10) { + drm_WARN_ON(display->drm, crtc_state->post_csc_lut == crtc_state->hw.gamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && - crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut); - drm_WARN_ON(&i915->drm, + crtc_state->pre_csc_lut != display->color.glk_linear_degamma_lut); + drm_WARN_ON(display->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != NULL && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); } else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != crtc_state->hw.degamma_lut && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); @@ -2278,7 +2307,7 @@ static void intel_assign_luts(struct intel_crtc_state *crtc_state) static int i9xx_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2293,9 +2322,9 @@ static int i9xx_color_check(struct intel_atomic_state *state, crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); - if (DISPLAY_VER(i915) < 4 && + if (DISPLAY_VER(display) < 4 && crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) { - ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut); + ret = i9xx_check_lut_10(crtc, crtc_state->hw.gamma_lut); if (ret) return ret; } @@ -2462,12 +2491,12 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) static int ilk_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) @@ -2501,7 +2530,7 @@ static int ilk_assign_luts(struct intel_crtc_state *crtc_state) static int ilk_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2511,15 +2540,17 @@ static int ilk_color_check(struct intel_atomic_state *state, return ret; if (crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "Degamma and gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] Degamma and gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2572,21 +2603,21 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) static int ivb_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct drm_property_blob *degamma_lut, *gamma_lut; if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) return ilk_assign_luts(crtc_state); - drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024); - drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024); + drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024); + drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024); - degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512, + degamma_lut = create_resized_lut(display, crtc_state->hw.degamma_lut, 512, false); if (IS_ERR(degamma_lut)) return PTR_ERR(degamma_lut); - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, 512, ilk_lut_limited_range(crtc_state)); if (IS_ERR(gamma_lut)) { drm_property_blob_put(degamma_lut); @@ -2605,7 +2636,7 @@ static int ivb_assign_luts(struct intel_crtc_state *crtc_state) static int ivb_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2615,22 +2646,25 @@ static int ivb_color_check(struct intel_atomic_state *state, return ret; if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) { - drm_dbg_kms(&i915->drm, - "C8 pixelformat and degamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] C8 pixelformat and degamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "YCbCr and degamma+gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2675,13 +2709,13 @@ static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_st static int glk_assign_luts(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (glk_use_pre_csc_lut_for_gamma(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, - DISPLAY_INFO(i915)->color.degamma_lut_size, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, + DISPLAY_INFO(display)->color.degamma_lut_size, false); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); @@ -2697,7 +2731,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state) if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; - gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, + gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) @@ -2720,7 +2754,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state) */ if (crtc_state->csc_enable && !crtc_state->pre_csc_lut) drm_property_replace_blob(&crtc_state->pre_csc_lut, - i915->display.color.glk_linear_degamma_lut); + display->color.glk_linear_degamma_lut); return 0; } @@ -2739,7 +2773,7 @@ static int glk_check_luts(const struct intel_crtc_state *crtc_state) static int glk_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; @@ -2750,15 +2784,17 @@ static int glk_color_check(struct intel_atomic_state *state, if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { - drm_dbg_kms(&i915->drm, - "YCbCr and CTM together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and CTM together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { - drm_dbg_kms(&i915->drm, - "YCbCr and degamma+gamma together are not possible\n"); + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -2795,8 +2831,7 @@ static int glk_color_check(struct intel_atomic_state *state, static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) @@ -2814,7 +2849,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) * ToDo: Extend to Logarithmic Gamma once the new UAPI * is accepted and implemented by a userspace consumer */ - else if (DISPLAY_VER(i915) >= 13) + else if (DISPLAY_VER(display) >= 13) gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG; @@ -3195,13 +3230,13 @@ static bool icl_lut_equal(const struct intel_crtc_state *crtc_state, static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) @@ -3210,8 +3245,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, i)); + u32 val = intel_de_read_fw(display, + PALETTE(display, pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -3221,15 +3256,15 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + u32 lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; u32 ldw, udw; int i; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, lut_size * sizeof(lut[0]), NULL); if (IS_ERR(blob)) return NULL; @@ -3237,10 +3272,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - ldw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0)); - udw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1)); + ldw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 0)); + udw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 1)); i9xx_lut_10_pack(&lut[i], ldw, udw); } @@ -3272,13 +3307,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3287,17 +3322,17 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 0)); - u32 udw = intel_de_read_fw(dev_priv, - PALETTE(dev_priv, pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(display, + PALETTE(display, pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } - lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0))); - lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1))); - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 2))); return blob; } @@ -3324,13 +3359,13 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3339,8 +3374,8 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0)); - u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1)); chv_cgm_degamma_pack(&lut[i], ldw, udw); } @@ -3350,13 +3385,13 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3365,8 +3400,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0)); - u32 udw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } @@ -3376,10 +3411,10 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) static void chv_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - crtc_state->cgm_mode = intel_de_read(i915, CGM_PIPE_MODE(crtc->pipe)); + crtc_state->cgm_mode = intel_de_read(display, CGM_PIPE_MODE(crtc->pipe)); i9xx_get_config(crtc_state); } @@ -3399,13 +3434,13 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) @@ -3414,7 +3449,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read_fw(i915, LGC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(display, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -3424,13 +3459,13 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3439,7 +3474,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(i915, PREC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(display, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } @@ -3487,13 +3522,13 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3504,14 +3539,14 @@ static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc, for (i = 0; i < lut_size; i++) { u32 val; - intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), prec_index + i); - val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); + val = intel_de_read_fw(display, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; @@ -3552,13 +3587,13 @@ static void ivb_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3566,19 +3601,19 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, lut = blob->data; - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), prec_index); - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | prec_index); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe)); + u32 val = intel_de_read_fw(display, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; @@ -3617,13 +3652,13 @@ static void bdw_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&dev_priv->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3636,22 +3671,22 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT | PRE_CSC_GAMC_INDEX_VALUE(0)); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); + u32 val = intel_de_read_fw(display, PRE_CSC_GAMC_DATA(pipe)); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) mtl_degamma_lut_pack(&lut[i], val); else glk_degamma_lut_pack(&lut[i], val); } - intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); return blob; @@ -3683,13 +3718,13 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob * icl_read_lut_multi_segment(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; + struct intel_display *display = to_intel_display(crtc); + int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; - blob = drm_property_create_blob(&i915->drm, + blob = drm_property_create_blob(display->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) @@ -3697,20 +3732,20 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc) lut = blob->data; - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_AUTO_INCREMENT | PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); for (i = 0; i < 9; i++) { - u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); - u32 udw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 ldw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe)); ilk_lut_12p4_pack(&lut[i], ldw, udw); } - intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); /* @@ -3877,15 +3912,15 @@ static const struct intel_color_funcs ilk_color_funcs = { void intel_color_crtc_init(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); int degamma_lut_size, gamma_lut_size; bool has_ctm; drm_mode_crtc_set_gamma_size(&crtc->base, 256); - gamma_lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; - degamma_lut_size = DISPLAY_INFO(i915)->color.degamma_lut_size; - has_ctm = DISPLAY_VER(i915) >= 5; + gamma_lut_size = DISPLAY_INFO(display)->color.gamma_lut_size; + degamma_lut_size = DISPLAY_INFO(display)->color.degamma_lut_size; + has_ctm = DISPLAY_VER(display) >= 5; /* * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the @@ -3895,57 +3930,59 @@ void intel_color_crtc_init(struct intel_crtc *crtc) * Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm) * are confirmed not to suffer from this restriction. */ - if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A) + if (DISPLAY_VER(display) == 3 && crtc->pipe == PIPE_A) gamma_lut_size = 256; drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size, has_ctm, gamma_lut_size); } -int intel_color_init(struct drm_i915_private *i915) +int intel_color_init(struct intel_display *display) { struct drm_property_blob *blob; - if (DISPLAY_VER(i915) != 10) + if (DISPLAY_VER(display) != 10) return 0; - blob = create_linear_lut(i915, - DISPLAY_INFO(i915)->color.degamma_lut_size); + blob = create_linear_lut(display, + DISPLAY_INFO(display)->color.degamma_lut_size); if (IS_ERR(blob)) return PTR_ERR(blob); - i915->display.color.glk_linear_degamma_lut = blob; + display->color.glk_linear_degamma_lut = blob; return 0; } -void intel_color_init_hooks(struct drm_i915_private *i915) +void intel_color_init_hooks(struct intel_display *display) { - if (HAS_GMCH(i915)) { + struct drm_i915_private *i915 = to_i915(display->drm); + + if (HAS_GMCH(display)) { if (IS_CHERRYVIEW(i915)) - i915->display.funcs.color = &chv_color_funcs; + display->funcs.color = &chv_color_funcs; else if (IS_VALLEYVIEW(i915)) - i915->display.funcs.color = &vlv_color_funcs; - else if (DISPLAY_VER(i915) >= 4) - i915->display.funcs.color = &i965_color_funcs; + display->funcs.color = &vlv_color_funcs; + else if (DISPLAY_VER(display) >= 4) + display->funcs.color = &i965_color_funcs; else - i915->display.funcs.color = &i9xx_color_funcs; + display->funcs.color = &i9xx_color_funcs; } else { - if (DISPLAY_VER(i915) >= 12) - i915->display.funcs.color = &tgl_color_funcs; - else if (DISPLAY_VER(i915) == 11) - i915->display.funcs.color = &icl_color_funcs; - else if (DISPLAY_VER(i915) == 10) - i915->display.funcs.color = &glk_color_funcs; - else if (DISPLAY_VER(i915) == 9) - i915->display.funcs.color = &skl_color_funcs; - else if (DISPLAY_VER(i915) == 8) - i915->display.funcs.color = &bdw_color_funcs; + if (DISPLAY_VER(display) >= 12) + display->funcs.color = &tgl_color_funcs; + else if (DISPLAY_VER(display) == 11) + display->funcs.color = &icl_color_funcs; + else if (DISPLAY_VER(display) == 10) + display->funcs.color = &glk_color_funcs; + else if (DISPLAY_VER(display) == 9) + display->funcs.color = &skl_color_funcs; + else if (DISPLAY_VER(display) == 8) + display->funcs.color = &bdw_color_funcs; else if (IS_HASWELL(i915)) - i915->display.funcs.color = &hsw_color_funcs; - else if (DISPLAY_VER(i915) == 7) - i915->display.funcs.color = &ivb_color_funcs; + display->funcs.color = &hsw_color_funcs; + else if (DISPLAY_VER(display) == 7) + display->funcs.color = &ivb_color_funcs; else - i915->display.funcs.color = &ilk_color_funcs; + display->funcs.color = &ilk_color_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 79f230a1709a..9d66457c1e89 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -11,11 +11,12 @@ struct intel_atomic_state; struct intel_crtc_state; struct intel_crtc; -struct drm_i915_private; +struct intel_display; +struct intel_dsb; struct drm_property_blob; -void intel_color_init_hooks(struct drm_i915_private *i915); -int intel_color_init(struct drm_i915_private *i915); +void intel_color_init_hooks(struct intel_display *display); +int intel_color_init(struct intel_display *display); void intel_color_crtc_init(struct intel_crtc *crtc); int intel_color_check(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -24,10 +25,13 @@ void intel_color_prepare_commit(struct intel_atomic_state *state, void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state); bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state); void intel_color_wait_commit(const struct intel_crtc_state *crtc_state); -void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); -void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); +void intel_color_commit_noarm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); +void intel_color_commit_arm(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); void intel_color_post_update(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); +void intel_color_modeset(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 835c8b844494..74c1983fe07e 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -81,12 +81,13 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector) return intel_encoder_to_crt(intel_attached_encoder(connector)); } -bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; - val = intel_de_read(dev_priv, adpa_reg); + val = intel_de_read(display, adpa_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) @@ -100,6 +101,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); intel_wakeref_t wakeref; @@ -110,7 +112,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, if (!wakeref) return false; - ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); + ret = intel_crt_port_enabled(display, crt->adpa_reg, pipe); intel_display_power_put(dev_priv, encoder->power_domain, wakeref); @@ -119,11 +121,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; - tmp = intel_de_read(dev_priv, crt->adpa_reg); + tmp = intel_de_read(display, crt->adpa_reg); if (tmp & ADPA_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; @@ -168,13 +170,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int mode) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 adpa; - if (DISPLAY_VER(dev_priv) >= 5) + if (DISPLAY_VER(display) >= 5) adpa = ADPA_HOTPLUG_BITS; else adpa = 0; @@ -193,7 +196,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, adpa |= ADPA_PIPE_SEL(crtc->pipe); if (!HAS_PCH_SPLIT(dev_priv)) - intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0); + intel_de_write(display, BCLRPAT(display, crtc->pipe), 0); switch (mode) { case DRM_MODE_DPMS_ON: @@ -210,7 +213,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, break; } - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); } static void intel_disable_crt(struct intel_atomic_state *state, @@ -241,9 +244,10 @@ static void hsw_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -253,6 +257,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(state); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -272,7 +277,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, hsw_fdi_disable(encoder); - drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -282,9 +287,10 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -294,11 +300,12 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); @@ -312,11 +319,12 @@ static void hsw_enable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder); intel_ddi_enable_transcoder_func(encoder, crtc_state); @@ -346,9 +354,10 @@ static enum drm_mode_status intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; + int max_dotclk = display->cdclk.max_dotclk_freq; enum drm_mode_status status; int max_clock; @@ -367,7 +376,7 @@ intel_crt_mode_valid(struct drm_connector *connector, * DAC limit supposedly 355 MHz. */ max_clock = 270000; - else if (IS_DISPLAY_VER(dev_priv, 3, 4)) + else if (IS_DISPLAY_VER(display, 3, 4)) max_clock = 400000; else max_clock = 350000; @@ -428,6 +437,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -450,7 +460,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev_priv)) { /* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "LPT only supports 24bpp\n"); return -EINVAL; } @@ -470,6 +480,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, static bool ilk_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); @@ -483,36 +494,36 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) crt->force_hotplug_required = false; - save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, + save_adpa = adpa = intel_de_read(display, crt->adpa_reg); + drm_dbg_kms(display->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; if (turn_off_dac) adpa &= ~ADPA_DAC_ENABLE; - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(dev_priv, + if (intel_de_wait_for_clear(display, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - intel_de_write(dev_priv, crt->adpa_reg, save_adpa); - intel_de_posting_read(dev_priv, crt->adpa_reg); + intel_de_write(display, crt->adpa_reg, save_adpa); + intel_de_posting_read(display, crt->adpa_reg); } } /* Check the status to see if both blue and green are on now */ - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n", + drm_dbg_kms(display->drm, "ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; @@ -520,6 +531,7 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); @@ -542,29 +554,29 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) */ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); - save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, + save_adpa = adpa = intel_de_read(display, crt->adpa_reg); + drm_dbg_kms(display->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; - intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_write(display, crt->adpa_reg, adpa); - if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, + if (intel_de_wait_for_clear(display, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_TRIGGER"); - intel_de_write(dev_priv, crt->adpa_reg, save_adpa); + intel_de_write(display, crt->adpa_reg, save_adpa); } /* Check the status to see if both blue and green are on now */ - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); if (reenable_hpd) @@ -575,6 +587,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_hotplug(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 stat; @@ -603,18 +616,18 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN(dev_priv), + if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display), CRT_HOTPLUG_FORCE_DETECT, 1000)) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "timed out waiting for FORCE_DETECT to go off"); } - stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)); + stat = intel_de_read(display, PORT_HOTPLUG_STAT(display)); if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ret = true; /* clear the interrupt we just generated, if any */ - intel_de_write(dev_priv, PORT_HOTPLUG_STAT(dev_priv), + intel_de_write(display, PORT_HOTPLUG_STAT(display), CRT_HOTPLUG_INT_STATUS); i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); @@ -660,8 +673,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, static bool intel_crt_detect_ddc(struct drm_connector *connector) { - struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); - struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); + struct intel_display *display = to_intel_display(connector->dev); const struct drm_edid *drm_edid; bool ret = false; @@ -674,15 +686,15 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) * have to check the EDID input spec of the attached device. */ if (drm_edid_is_digital(drm_edid)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT detected via DDC:0x50 [EDID]\n"); ret = true; } } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } @@ -694,8 +706,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) { - struct drm_device *dev = crt->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&crt->base); enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 save_bclrpat; u32 save_vtotal; @@ -706,14 +717,14 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) u8 st00; enum drm_connector_status status; - drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); + drm_dbg_kms(display->drm, "starting load-detect on CRT\n"); - save_bclrpat = intel_de_read(dev_priv, - BCLRPAT(dev_priv, cpu_transcoder)); - save_vtotal = intel_de_read(dev_priv, - TRANS_VTOTAL(dev_priv, cpu_transcoder)); - vblank = intel_de_read(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder)); + save_bclrpat = intel_de_read(display, + BCLRPAT(display, cpu_transcoder)); + save_vtotal = intel_de_read(display, + TRANS_VTOTAL(display, cpu_transcoder)); + vblank = intel_de_read(display, + TRANS_VBLANK(display, cpu_transcoder)); vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; @@ -722,25 +733,25 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; /* Set the border color to purple. */ - intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder), 0x500050); + intel_de_write(display, BCLRPAT(display, cpu_transcoder), 0x500050); - if (DISPLAY_VER(dev_priv) != 2) { - u32 transconf = intel_de_read(dev_priv, - TRANSCONF(dev_priv, cpu_transcoder)); + if (DISPLAY_VER(display) != 2) { + u32 transconf = intel_de_read(display, + TRANSCONF(display, cpu_transcoder)); - intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), + intel_de_write(display, TRANSCONF(display, cpu_transcoder), transconf | TRANSCONF_FORCE_BORDER); - intel_de_posting_read(dev_priv, - TRANSCONF(dev_priv, cpu_transcoder)); + intel_de_posting_read(display, + TRANSCONF(display, cpu_transcoder)); /* Wait for next Vblank to substitue * border color for Color info */ - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); - st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe)); + st00 = intel_de_read8(display, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; - intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), + intel_de_write(display, TRANSCONF(display, cpu_transcoder), transconf); } else { bool restore_vblank = false; @@ -751,13 +762,13 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - u32 vsync = intel_de_read(dev_priv, - TRANS_VSYNC(dev_priv, cpu_transcoder)); + u32 vsync = intel_de_read(display, + TRANS_VSYNC(display, cpu_transcoder)); u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; vblank_start = vsync_start; - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, + TRANS_VBLANK(display, cpu_transcoder), VBLANK_START(vblank_start - 1) | VBLANK_END(vblank_end - 1)); restore_vblank = true; @@ -771,9 +782,9 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) /* * Wait for the border to be displayed */ - while (intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) >= vactive) + while (intel_de_read(display, PIPEDSL(display, pipe)) >= vactive) ; - while ((dsl = intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe))) <= vsample) + while ((dsl = intel_de_read(display, PIPEDSL(display, pipe))) <= vsample) ; /* * Watch ST00 for an entire scanline @@ -783,15 +794,15 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) do { count++; /* Read the ST00 VGA status register */ - st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); + st00 = intel_de_read8(display, _VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; - } while ((intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) == dsl)); + } while ((intel_de_read(display, PIPEDSL(display, pipe)) == dsl)); /* restore vblank if necessary */ if (restore_vblank) - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, + TRANS_VBLANK(display, cpu_transcoder), vblank); /* * If more than 3/4 of the scanline detected a monitor, @@ -805,7 +816,7 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) } /* Restore previous settings */ - intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder), + intel_de_write(display, BCLRPAT(display, cpu_transcoder), save_bclrpat); return status; @@ -842,6 +853,7 @@ intel_crt_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; @@ -849,7 +861,7 @@ intel_crt_detect(struct drm_connector *connector, intel_wakeref_t wakeref; int status; - drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); @@ -859,7 +871,7 @@ intel_crt_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(dev_priv)) return connector->status; - if (dev_priv->display.params.load_detect_test) { + if (display->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); goto load_detect; @@ -872,18 +884,18 @@ intel_crt_detect(struct drm_connector *connector, wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); - if (I915_HAS_HOTPLUG(dev_priv)) { + if (I915_HAS_HOTPLUG(display)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. */ if (intel_crt_detect_hotplug(connector)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT detected via hotplug\n"); status = connector_status_connected; goto out; } else - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "CRT not detected via hotplug\n"); } @@ -896,7 +908,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev_priv)) { + if (I915_HAS_HOTPLUG(display)) { status = connector_status_disconnected; goto out; } @@ -916,10 +928,10 @@ load_detect: } else { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; - else if (DISPLAY_VER(dev_priv) < 4) + else if (DISPLAY_VER(display) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (dev_priv->display.params.load_detect_test) + else if (display->params.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; @@ -934,6 +946,7 @@ out: static int intel_crt_get_modes(struct drm_connector *connector) { + struct intel_display *display = to_intel_display(connector->dev); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); @@ -953,7 +966,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) goto out; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - ddc = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB); + ddc = intel_gmbus_get_adapter(display, GMBUS_PIN_DPB); ret = intel_crt_ddc_get_modes(connector, ddc); out: @@ -964,19 +977,19 @@ out: void intel_crt_reset(struct drm_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_display *display = to_intel_display(encoder->dev); struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); - if (DISPLAY_VER(dev_priv) >= 5) { + if (DISPLAY_VER(display) >= 5) { u32 adpa; - adpa = intel_de_read(dev_priv, crt->adpa_reg); + adpa = intel_de_read(display, crt->adpa_reg); adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= ADPA_HOTPLUG_BITS; - intel_de_write(dev_priv, crt->adpa_reg, adpa); - intel_de_posting_read(dev_priv, crt->adpa_reg); + intel_de_write(display, crt->adpa_reg, adpa); + intel_de_posting_read(display, crt->adpa_reg); - drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa); + drm_dbg_kms(display->drm, "crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = true; } @@ -1006,8 +1019,9 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; -void intel_crt_init(struct drm_i915_private *dev_priv) +void intel_crt_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; @@ -1022,7 +1036,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else adpa_reg = ADPA; - adpa = intel_de_read(dev_priv, adpa_reg); + adpa = intel_de_read(display, adpa_reg); if ((adpa & ADPA_DAC_ENABLE) == 0) { /* * On some machines (some IVB at least) CRT can be @@ -1032,11 +1046,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv) * take. So the only way to tell is attempt to enable * it and see what happens. */ - intel_de_write(dev_priv, adpa_reg, + intel_de_write(display, adpa_reg, adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0) + if ((intel_de_read(display, adpa_reg) & ADPA_DAC_ENABLE) == 0) return; - intel_de_write(dev_priv, adpa_reg, adpa); + intel_de_write(display, adpa_reg, adpa); } crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); @@ -1049,16 +1063,16 @@ void intel_crt_init(struct drm_i915_private *dev_priv) return; } - ddc_pin = dev_priv->display.vbt.crt_ddc_pin; + ddc_pin = display->vbt.crt_ddc_pin; connector = &intel_connector->base; crt->connector = intel_connector; - drm_connector_init_with_ddc(&dev_priv->drm, connector, + drm_connector_init_with_ddc(display->drm, connector, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA, - intel_gmbus_get_adapter(dev_priv, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); - drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, + drm_encoder_init(display->drm, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); @@ -1070,14 +1084,14 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else crt->base.pipe_mask = ~0; - if (DISPLAY_VER(dev_priv) != 2) + if (DISPLAY_VER(display) != 2) connector->interlace_allowed = true; crt->adpa_reg = adpa_reg; crt->base.power_domain = POWER_DOMAIN_PORT_CRT; - if (I915_HAS_HOTPLUG(dev_priv) && + if (I915_HAS_HOTPLUG(display) && !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; @@ -1087,7 +1101,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) } intel_connector->base.polled = intel_connector->polled; - if (HAS_DDI(dev_priv)) { + if (HAS_DDI(display)) { assert_port_valid(dev_priv, PORT_E); crt->base.port = PORT_E; @@ -1131,8 +1145,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; - dev_priv->display.fdi.rx_config = intel_de_read(dev_priv, - FDI_RX_CTL(PIPE_A)) & fdi_config; + display->fdi.rx_config = intel_de_read(display, + FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h index fe7690c2b948..e0abfe96a3d2 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.h +++ b/drivers/gpu/drm/i915/display/intel_crt.h @@ -10,20 +10,20 @@ enum pipe; struct drm_encoder; -struct drm_i915_private; +struct intel_display; #ifdef I915 -bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe); -void intel_crt_init(struct drm_i915_private *dev_priv); +void intel_crt_init(struct intel_display *display); void intel_crt_reset(struct drm_encoder *encoder); #else -static inline bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, +static inline bool intel_crt_port_enabled(struct intel_display *display, i915_reg_t adpa_reg, enum pipe *pipe) { return false; } -static inline void intel_crt_init(struct drm_i915_private *dev_priv) +static inline void intel_crt_init(struct intel_display *display) { } static inline void intel_crt_reset(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 1b578cad2813..a2c528d707f4 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> +#include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> #include "i915_vgpu.h" @@ -35,11 +36,11 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); - if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, - "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", - crtc->base.id, crtc->name)) + if (INTEL_DISPLAY_STATE_WARN(display, drm_crtc_vblank_get(crtc) == 0, + "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", + crtc->base.id, crtc->name)) drm_crtc_vblank_put(crtc); } @@ -48,12 +49,12 @@ struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); } -struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, +struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, enum pipe pipe) { struct intel_crtc *crtc; - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { if (crtc->pipe == pipe) return crtc; } @@ -69,7 +70,8 @@ void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_display *display = &i915->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); if (crtc->active) intel_crtc_wait_for_next_vblank(crtc); @@ -122,6 +124,8 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state); + assert_vblank_disabled(&crtc->base); drm_crtc_set_max_vblank_count(&crtc->base, intel_crtc_max_vblank_count(crtc_state)); @@ -138,6 +142,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc); /* * Should really happen exactly when we disable the pipe @@ -148,6 +153,10 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) drm_crtc_vblank_off(&crtc->base); assert_vblank_disabled(&crtc->base); + + crtc->block_dc_for_vblank = false; + + flush_work(&display->irq.vblank_dc_work); } struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) @@ -387,13 +396,31 @@ fail: return ret; } +int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_crtc *drm_crtc; + struct intel_crtc *crtc; + + drm_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); + if (!drm_crtc) + return -ENOENT; + + crtc = to_intel_crtc(drm_crtc); + pipe_from_crtc_id->pipe = crtc->pipe; + + return 0; +} + static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) { return crtc_state->hw.active && - !intel_crtc_needs_modeset(crtc_state) && !crtc_state->preload_luts && + !intel_crtc_needs_modeset(crtc_state) && intel_crtc_needs_color_update(crtc_state) && - !intel_color_uses_dsb(crtc_state); + !intel_color_uses_dsb(crtc_state) && + !crtc_state->use_dsb; } static void intel_crtc_vblank_work(struct kthread_work *base) @@ -457,6 +484,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 1000 * adjusted_mode->crtc_htotal); } +int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode, + int scanlines) +{ + /* paranoia */ + if (!adjusted_mode->crtc_clock) + return 1; + + return DIV_ROUND_UP_ULL(mul_u32_u32(scanlines, adjusted_mode->crtc_htotal * 1000), + adjusted_mode->crtc_clock); +} + /** * intel_pipe_update_start() - start update of a set of display registers * @state: the atomic state @@ -484,12 +522,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state, intel_psr_lock(new_crtc_state); if (new_crtc_state->do_async_flip) { - spin_lock_irq(&crtc->base.dev->event_lock); - /* arm the event for the flip done irq handler */ - crtc->flip_done_event = new_crtc_state->uapi.event; - spin_unlock_irq(&crtc->base.dev->event_lock); - - new_crtc_state->uapi.event = NULL; + intel_crtc_prepare_vblank_event(new_crtc_state, + &crtc->flip_done_event); return; } @@ -589,6 +623,19 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state) crtc_state->uapi.event = NULL; } +void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state, + struct drm_pending_vblank_event **event) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + unsigned long irqflags; + + spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags); + *event = crtc_state->uapi.event; + spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags); + + crtc_state->uapi.event = NULL; +} + /** * intel_pipe_update_end() - end update of a set of display registers * @state: the atomic state diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index b615b7ab5ccd..de54ae1deedf 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -10,11 +10,15 @@ enum i9xx_plane_id; enum pipe; +struct drm_device; struct drm_display_mode; +struct drm_file; struct drm_i915_private; +struct drm_pending_vblank_event; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; /* * FIXME: We should instead only take spinlocks once for the entire update @@ -28,9 +32,15 @@ struct intel_crtc_state; int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); +int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode, + int scanlines); void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state); +void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state, + struct drm_pending_vblank_event **event); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); +int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, struct intel_crtc *crtc); @@ -43,7 +53,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_wait_for_vblank_workers(struct intel_atomic_state *state); struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915); -struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, +struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display, enum pipe pipe); void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 9ad53e1cbbd0..9ba77970dab7 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -9,6 +9,7 @@ #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_vblank.h> #include "i915_reg.h" #include "intel_atomic.h" @@ -26,8 +27,6 @@ #include "intel_vblank.h" #include "skl_watermark.h" -#include "gem/i915_gem_object.h" - /* Cursor formats */ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, @@ -275,7 +274,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } /* TODO: split into noarm+arm pair */ -static void i845_cursor_update_arm(struct intel_plane *plane, +static void i845_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -315,10 +315,11 @@ static void i845_cursor_update_arm(struct intel_plane *plane, } } -static void i845_cursor_disable_arm(struct intel_plane *plane, +static void i845_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i845_cursor_update_arm(plane, crtc_state, NULL); + i845_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -527,22 +528,25 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), 0); } -static void wa_16021440873(struct intel_plane *plane, +static void wa_16021440873(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 ctl = plane_state->ctl; int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1; @@ -551,16 +555,18 @@ static void wa_16021440873(struct intel_plane *plane, ctl &= ~MCURSOR_MODE_MASK; ctl |= MCURSOR_MODE_64_2B; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl); - intel_de_write(dev_priv, CURPOS_ERLY_TPT(dev_priv, pipe), - CURSOR_POS_Y(et_y_position)); + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), + CURSOR_POS_Y(et_y_position)); } -static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -571,19 +577,17 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, if (crtc_state->enable_psr2_su_region_et) { u32 val = intel_cursor_position(crtc_state, plane_state, true); - intel_de_write_fw(dev_priv, - CURPOS_ERLY_TPT(dev_priv, pipe), - val); + + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val); } - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), - plane_state->ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl); } else { /* Wa_16021440873 */ if (crtc_state->enable_psr2_su_region_et) - wa_16021440873(plane, crtc_state, plane_state); + wa_16021440873(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); } } @@ -610,9 +614,11 @@ static u32 skl_cursor_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_cursor_wm(struct intel_plane *plane, +static void skl_write_cursor_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -622,30 +628,32 @@ static void skl_write_cursor_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, CUR_WM(pipe, level), - skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, CUR_WM(pipe, level), + skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, CUR_WM_TRANS(pipe), - skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe), + skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, CUR_WM_SAGV(pipe), - skl_cursor_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, CUR_WM_SAGV_TRANS(pipe), - skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe), + skl_cursor_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV_TRANS(pipe), + skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, CUR_BUF_CFG(pipe), - skl_cursor_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, CUR_BUF_CFG(pipe), + skl_cursor_ddb_reg_val(ddb)); } /* TODO: split into noarm+arm pair */ -static void i9xx_cursor_update_arm(struct intel_plane *plane, +static void i9xx_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; @@ -685,38 +693,36 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, */ if (DISPLAY_VER(dev_priv) >= 9) - skl_write_cursor_wm(plane, crtc_state); + skl_write_cursor_wm(dsb, plane, crtc_state); if (plane_state) - i9xx_cursor_update_sel_fetch_arm(plane, crtc_state, - plane_state); + i9xx_cursor_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { if (HAS_CUR_FBC(dev_priv)) - intel_de_write_fw(dev_priv, - CUR_FBC_CTL(dev_priv, pipe), - fbc_ctl); - intel_de_write_fw(dev_priv, CURCNTR(dev_priv, pipe), cntl); - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl); + intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); plane->cursor.base = base; plane->cursor.size = fbc_ctl; plane->cursor.cntl = cntl; } else { - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); } } -static void i9xx_cursor_disable_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i9xx_cursor_update_arm(plane, crtc_state, NULL); + i9xx_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -905,10 +911,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane, } if (new_plane_state->uapi.visible) { - intel_plane_update_noarm(plane, crtc_state, new_plane_state); - intel_plane_update_arm(plane, crtc_state, new_plane_state); + intel_plane_update_noarm(NULL, plane, crtc_state, new_plane_state); + intel_plane_update_arm(NULL, plane, crtc_state, new_plane_state); } else { - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); } local_irq_enable(); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 4a6c3040ca15..71dc659228ab 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -34,6 +34,9 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder) struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_encoder_to_phy(encoder); + if (IS_PANTHERLAKE(i915) && phy == PHY_A) + return true; + if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) return true; @@ -65,22 +68,23 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder) } static void -assert_dc_off(struct drm_i915_private *i915) +assert_dc_off(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); bool enabled; enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF); - drm_WARN_ON(&i915->drm, !enabled); + drm_WARN_ON(display->drm, !enabled); } static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); int lane; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane) - intel_de_rmw(i915, - XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_MSGBUS_TIMER(display, encoder->port, lane), XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, XELPDP_PORT_MSGBUS_TIMER_VAL); } @@ -119,25 +123,28 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w static void intel_clear_response_ready_flag(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane), + intel_de_rmw(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, encoder->port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); + drm_err_once(display->drm, + "Failed to bring PHY %c to idle.\n", + phy_name(phy)); return; } @@ -147,22 +154,23 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, int command, int lane, u32 *val) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - if (intel_de_wait_custom(i915, - XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), + if (intel_de_wait_custom(display, + XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_MSGBUS_TIMEOUT_FAST_US, XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { - drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", + drm_dbg_kms(display->drm, + "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); - if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) & + if (!(intel_de_read(display, XELPDP_PORT_MSGBUS_TIMER(display, port, lane)) & XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT)) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Hardware did not detect a timeout\n", phy_name(phy)); @@ -171,14 +179,18 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, } if (*val & XELPDP_PORT_P2M_ERROR_SET) { - drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Error occurred during %s command. Status: 0x%x\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; } if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { - drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), + drm_dbg_kms(display->drm, + "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", + phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -190,22 +202,22 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, static int __intel_cx0_read_once(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); @@ -221,7 +233,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } @@ -229,11 +242,11 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder, static u8 __intel_cx0_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to read successfully */ for (i = 0; i < 3; i++) { @@ -243,7 +256,8 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder, return status; } - drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", + drm_err_once(display->drm, + "PHY %c Read %04x failed after %d retries.\n", phy_name(phy), addr, i); return 0; @@ -260,32 +274,32 @@ static u8 intel_cx0_read(struct intel_encoder *encoder, static int __intel_cx0_write_once(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } - intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; @@ -295,9 +309,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; - } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) & + } else if ((intel_de_read(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); return -EINVAL; @@ -310,7 +324,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(encoder, lane); + if (DISPLAY_VER(display) < 30) + intel_cx0_bus_reset(encoder, lane); return 0; } @@ -318,11 +333,11 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder, static void __intel_cx0_write(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); int i, status; - assert_dc_off(i915); + assert_dc_off(display); /* 3 tries is assumed to be enough to write successfully */ for (i = 0; i < 3; i++) { @@ -332,7 +347,7 @@ static void __intel_cx0_write(struct intel_encoder *encoder, return; } - drm_err_once(&i915->drm, + drm_err_once(display->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } @@ -348,9 +363,9 @@ static void intel_cx0_write(struct intel_encoder *encoder, static void intel_c20_sram_write(struct intel_encoder *encoder, int lane, u16 addr, u16 data) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); @@ -362,10 +377,10 @@ static void intel_c20_sram_write(struct intel_encoder *encoder, static u16 intel_c20_sram_read(struct intel_encoder *encoder, int lane, u16 addr) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u16 val; - assert_dc_off(i915); + assert_dc_off(display); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); @@ -429,7 +444,7 @@ static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; u8 owned_lane_mask; intel_wakeref_t wakeref; @@ -444,7 +459,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, wakeref = intel_cx0_phy_transaction_begin(encoder); trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&i915->drm, !trans)) { + if (drm_WARN_ON_ONCE(display->drm, !trans)) { intel_cx0_phy_transaction_end(encoder, wakeref); return; } @@ -923,10 +938,10 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { }, .mplla = { 0x3104, /* mplla cfg0 */ 0xd105, /* mplla cfg1 */ - 0xc025, /* mplla cfg2 */ - 0xc025, /* mplla cfg3 */ - 0xa6ab, /* mplla cfg4 */ - 0x8c00, /* mplla cfg5 */ + 0x9217, /* mplla cfg2 */ + 0x9217, /* mplla cfg3 */ + 0x8c00, /* mplla cfg4 */ + 0x759a, /* mplla cfg5 */ 0x4000, /* mplla cfg6 */ 0x0003, /* mplla cfg7 */ 0x3555, /* mplla cfg8 */ @@ -1122,6 +1137,22 @@ static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = { NULL, }; +static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = { + &mtl_c20_dp_rbr, + &xe2hpd_c20_edp_r216, + &xe2hpd_c20_edp_r243, + &mtl_c20_dp_hbr1, + &xe2hpd_c20_edp_r324, + &xe2hpd_c20_edp_r432, + &mtl_c20_dp_hbr2, + &xe2hpd_c20_edp_r675, + &mtl_c20_dp_hbr3, + &mtl_c20_dp_uhbr10, + &xe2hpd_c20_dp_uhbr13_5, + &mtl_c20_dp_uhbr20, + NULL, +}; + /* * HDMI link rates with 38.4 MHz reference clock. */ @@ -2003,12 +2034,12 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll; int i; if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_panel_use_ssc(i915)) { + if (intel_panel_use_ssc(display)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); pll_state->ssc_enabled = @@ -2019,7 +2050,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, if (pll_state->ssc_enabled) return; - drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); + drm_WARN_ON(display->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); for (i = 4; i < 9; i++) pll_state->c10.pll[i] = 0; } @@ -2073,7 +2104,7 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c10_pll_program(struct drm_i915_private *i915, +static void intel_c10_pll_program(struct intel_display *display, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { @@ -2106,7 +2137,7 @@ static void intel_c10_pll_program(struct drm_i915_private *i915, MB_WRITE_COMMITTED); } -static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c10pll_dump_hw_state(struct intel_display *display, const struct intel_c10pll_state *hw_state) { bool fracen; @@ -2115,35 +2146,39 @@ static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, unsigned int multiplier, tx_clk_div; fracen = hw_state->pll[0] & C10_PLL0_FRACEN; - drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ", + drm_dbg_kms(display->drm, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen)); if (fracen) { frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; - drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n", + drm_dbg_kms(display->drm, "quot: %u, rem: %u, den: %u,\n", frac_quot, frac_rem, frac_den); } multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | hw_state->pll[2]) / 2 + 16; tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); - drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:"); - drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); + drm_dbg_kms(display->drm, "c10pll_rawhw_state:"); + drm_dbg_kms(display->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, + hw_state->cmn); BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) - drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", + drm_dbg_kms(display->drm, + "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); } -static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state) +static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); + struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20; u64 datarate; u64 mpll_tx_clk_div; u64 vco_freq_shift; @@ -2152,13 +2187,14 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ u64 mpll_multiplier; u64 mpll_fracn_quot; u64 mpll_fracn_rem; + u16 tx_misc; u8 mpllb_ana_freq_vco; u8 mpll_div_multiplier; - if (pixel_clock < 25175 || pixel_clock > 600000) + if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000) return -EINVAL; - datarate = ((u64)pixel_clock * 1000) * 10; + datarate = ((u64)crtc_state->port_clock * 1000) * 10; mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); vco_freq = (datarate << vco_freq_shift) >> 8; @@ -2171,6 +2207,11 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)), datarate), 255); + if (DISPLAY_VER(display) >= 20) + tx_misc = 0x5; + else + tx_misc = 0x0; + if (vco_freq <= DATARATE_3000000000) mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3; else if (vco_freq <= DATARATE_3500000000) @@ -2180,9 +2221,9 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ else mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; - pll_state->clock = pixel_clock; + pll_state->clock = crtc_state->port_clock; pll_state->tx[0] = 0xbe88; - pll_state->tx[1] = 0x9800; + pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc); pll_state->tx[2] = 0x0000; pll_state->cmn[0] = 0x0500; pll_state->cmn[1] = 0x0005; @@ -2239,13 +2280,19 @@ static const struct intel_c20pll_state * const * intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_has_dp_encoder(crtc_state)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return xe2hpd_c20_edp_tables; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { + if (DISPLAY_RUNTIME_INFO(display)->edp_typec_support) + return xe3lpd_c20_dp_edp_tables; + if (DISPLAY_VERx100(display) == 1401) + return xe2hpd_c20_edp_tables; + } - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) + if (DISPLAY_VER(display) >= 30) + return xe3lpd_c20_dp_edp_tables; + else if (DISPLAY_VERx100(display) == 1401) return xe2hpd_c20_dp_tables; else return mtl_c20_dp_tables; @@ -2266,8 +2313,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, /* try computed C20 HDMI tables before using consolidated tables */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, - &crtc_state->dpll_hw_state.cx0pll.c20) == 0) + if (intel_c20_compute_hdmi_tmds_pll(crtc_state) == 0) return 0; } @@ -2347,10 +2393,10 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state) { + struct intel_display *display = to_intel_display(encoder); bool cntx; intel_wakeref_t wakeref; int i; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -2362,11 +2408,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i)); + PHY_C20_B_TX_CNTX_CFG(display, i)); else pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i)); + PHY_C20_A_TX_CNTX_CFG(display, i)); } /* Read common configuration */ @@ -2374,11 +2420,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i)); + PHY_C20_B_CMN_CNTX_CFG(display, i)); else pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i)); + PHY_C20_A_CMN_CNTX_CFG(display, i)); } if (intel_c20phy_use_mpllb(pll_state)) { @@ -2387,11 +2433,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLB_CNTX_CFG(display, i)); else pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLB_CNTX_CFG(display, i)); } } else { /* MPLLA configuration */ @@ -2399,11 +2445,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, if (cntx) pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_B_MPLLA_CNTX_CFG(display, i)); else pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i)); + PHY_C20_A_MPLLA_CNTX_CFG(display, i)); } } @@ -2412,33 +2458,37 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, intel_cx0_phy_transaction_end(encoder, wakeref); } -static void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, +static void intel_c20pll_dump_hw_state(struct intel_display *display, const struct intel_c20pll_state *hw_state) { int i; - drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n"); - drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", + drm_dbg_kms(display->drm, "c20pll_hw_state:\n"); + drm_dbg_kms(display->drm, + "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); - drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", + drm_dbg_kms(display->drm, + "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); if (intel_c20phy_use_mpllb(hw_state)) { for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) - drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); + drm_dbg_kms(display->drm, "mpllb[%d] = 0x%.4x\n", i, + hw_state->mpllb[i]); } else { for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) - drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); + drm_dbg_kms(display->drm, "mplla[%d] = 0x%.4x\n", i, + hw_state->mplla[i]); } } -void intel_cx0pll_dump_hw_state(struct drm_i915_private *i915, +void intel_cx0pll_dump_hw_state(struct intel_display *display, const struct intel_cx0pll_state *hw_state) { if (hw_state->use_c10) - intel_c10pll_dump_hw_state(i915, &hw_state->c10); + intel_c10pll_dump_hw_state(display, &hw_state->c10); else - intel_c20pll_dump_hw_state(i915, &hw_state->c20); + intel_c20pll_dump_hw_state(display, &hw_state->c20); } static u8 intel_c20_get_dp_rate(u32 clock) @@ -2538,7 +2588,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp) return 0; } -static void intel_c20_pll_program(struct drm_i915_private *i915, +static void intel_c20_pll_program(struct intel_display *display, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { @@ -2571,11 +2621,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_TX_CNTX_CFG(i915, i), + PHY_C20_A_TX_CNTX_CFG(display, i), pll_state->tx[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_TX_CNTX_CFG(i915, i), + PHY_C20_B_TX_CNTX_CFG(display, i), pll_state->tx[i]); } @@ -2583,11 +2633,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_CMN_CNTX_CFG(i915, i), + PHY_C20_A_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_CMN_CNTX_CFG(i915, i), + PHY_C20_B_CMN_CNTX_CFG(display, i), pll_state->cmn[i]); } @@ -2596,22 +2646,22 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLB_CNTX_CFG(i915, i), + PHY_C20_A_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLB_CNTX_CFG(i915, i), + PHY_C20_B_MPLLB_CNTX_CFG(display, i), pll_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_A_MPLLA_CNTX_CFG(i915, i), + PHY_C20_A_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); else intel_c20_sram_write(encoder, INTEL_CX0_LANE0, - PHY_C20_B_MPLLA_CNTX_CFG(i915, i), + PHY_C20_B_MPLLA_CNTX_CFG(display, i), pll_state->mplla[i]); } } @@ -2678,10 +2728,10 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 val = 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), XELPDP_PORT_REVERSAL, lane_reversal ? XELPDP_PORT_REVERSAL : 0); @@ -2703,7 +2753,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, else val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val); @@ -2734,48 +2784,49 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, u8 lane_mask, u8 state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); - i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port); + i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(display, port); int lane; - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), intel_cx0_get_powerdown_state(lane_mask, state)); /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) - if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), + if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); intel_cx0_bus_reset(encoder, lane); } - intel_de_rmw(i915, buf_ctl2_reg, + intel_de_rmw(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (intel_de_wait_custom(i915, buf_ctl2_reg, + if (intel_de_wait_custom(display, buf_ctl2_reg, intel_cx0_get_powerdown_update(lane_mask), 0, XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); } static void intel_cx0_setup_powerdown(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | @@ -2807,7 +2858,7 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, bool lane_reversal) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum phy phy = intel_encoder_to_phy(encoder); u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); @@ -2820,48 +2871,51 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port), XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, lane_pipe_reset); - if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, lane_phy_current_status, XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port), intel_cx0_get_pclk_refclk_ack(owned_lane_mask), intel_cx0_get_pclk_refclk_ack(lane_mask), XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", + drm_warn(display->drm, + "PHY %c failed to request refclk after %dus.\n", phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_RESET); intel_cx0_setup_powerdown(encoder); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0); + intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0); - if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port), + if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_current_status, XELPDP_PORT_RESET_END_TIMEOUT)) - drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", + drm_warn(display->drm, + "PHY %c failed to bring out of Lane reset after %dms.\n", phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); } -static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, - struct intel_encoder *encoder, int lane_count, +static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count, bool lane_reversal) { int i; @@ -2930,7 +2984,7 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) static void intel_cx0pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; @@ -2962,15 +3016,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, /* 5. Program PHY internal PLL internal registers. */ if (intel_encoder_is_c10phy(encoder)) - intel_c10_pll_program(i915, crtc_state, encoder); + intel_c10_pll_program(display, crtc_state, encoder); else - intel_c20_pll_program(i915, crtc_state, encoder); + intel_c20_pll_program(display, crtc_state, encoder); /* * 6. Program the enabled and disabled owned PHY lane * transmitters over message bus */ - intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal); + intel_cx0_program_phy_lane(encoder, crtc_state->lane_count, lane_reversal); /* * 7. Follow the Display Voltage Frequency Switching - Sequence @@ -2981,23 +3035,23 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, * 8. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); /* * 9. Set PORT_CLOCK_CTL register PCLK PLL Request * LN<Lane for maxPCLK> to "1" to enable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_ack(maxpclk_lane), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", + drm_warn(display->drm, "Port %c PLL not locked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); /* @@ -3011,15 +3065,16 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - u32 clock; - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + struct intel_display *display = to_intel_display(encoder); + u32 clock, val; + + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); - drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); - drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK)); + drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); + drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_ACK)); switch (clock) { case XELPDP_DDI_CLOCK_SELECT_TBT_162: @@ -3036,7 +3091,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) } } -static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) +static int intel_mtl_tbt_clock_select(int clock) { switch (clock) { case 162000: @@ -3056,7 +3111,7 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); u32 val = 0; @@ -3064,13 +3119,13 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ - val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); + val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock)); val |= XELPDP_FORWARD_CLOCK_UNGATE; - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ - val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); /* * 3. Follow the Display Voltage Frequency Switching - Sequence @@ -3081,14 +3136,15 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. */ val |= XELPDP_TBT_CLOCK_REQUEST; - intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val); + intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, XELPDP_TBT_CLOCK_ACK, 100, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3100,7 +3156,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, * 7. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); } @@ -3117,12 +3173,14 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder, static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (intel_encoder_is_c10phy(encoder)) return CX0_P2PG_STATE_DISABLE; - if (IS_BATTLEMAGE(i915) && encoder->port == PORT_A) + if ((IS_BATTLEMAGE(i915) && encoder->port == PORT_A) || + (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP)) return CX0_P2PG_STATE_DISABLE; return CX0_P4PG_STATE_DISABLE; @@ -3130,7 +3188,7 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) static void intel_cx0pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -3147,21 +3205,22 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> * to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); /* 4. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) - drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", + drm_warn(display->drm, + "Port %c PLL not unlocked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); /* @@ -3170,9 +3229,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) */ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK, 0); - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_FORWARD_CLOCK_UNGATE, 0); intel_cx0_phy_transaction_end(encoder, wakeref); @@ -3180,7 +3239,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); /* @@ -3191,13 +3250,14 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) - drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", + drm_warn(display->drm, + "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* @@ -3208,12 +3268,12 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) /* * 5. Program PORT CLOCK CTRL register to disable and gate clocks */ - intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, 0); /* 6. Program DDI_CLK_VALFREQ to 0. */ - intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); + intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); } void intel_mtl_pll_disable(struct intel_encoder *encoder) @@ -3230,13 +3290,15 @@ enum icl_port_dpll_id intel_mtl_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + u32 val, clock; + /* * TODO: Determine the PLL type from the SW state, once MTL PLL * handling is done via the standard shared DPLL framework. */ - u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port)); - u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); + clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) @@ -3250,28 +3312,28 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c10pll_state *mpllb_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10; int i; for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { u8 expected = mpllb_sw_state->pll[i]; - I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, i, - expected, mpllb_hw_state->pll[i]); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->pll[i] != expected, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, i, + expected, mpllb_hw_state->pll[i]); } - I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->tx, mpllb_hw_state->tx); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->tx != mpllb_sw_state->tx, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->tx, mpllb_hw_state->tx); - I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->cmn, mpllb_hw_state->cmn); + INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->cmn != mpllb_sw_state->cmn, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->cmn, mpllb_hw_state->cmn); } void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, @@ -3357,64 +3419,64 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, struct intel_encoder *encoder, struct intel_c20pll_state *mpll_hw_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20; bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state); bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state); int clock = intel_c20pll_calc_port_clock(encoder, mpll_sw_state); int i; - I915_STATE_WARN(i915, mpll_hw_state->clock != clock, - "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - mpll_sw_state->clock, mpll_hw_state->clock); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->clock != clock, + "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + mpll_sw_state->clock, mpll_hw_state->clock); - I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, - "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", - crtc->base.base.id, crtc->base.name, - sw_use_mpllb, hw_use_mpllb); + INTEL_DISPLAY_STATE_WARN(display, sw_use_mpllb != hw_use_mpllb, + "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + sw_use_mpllb, hw_use_mpllb); if (hw_use_mpllb) { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], - "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], + "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { - I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], - "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); } } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { - I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], - "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->tx[i], mpll_hw_state->tx[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], + "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->tx[i], mpll_hw_state->tx[i]); } for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) { - I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], - "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", - crtc->base.base.id, crtc->base.name, i, - mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); + INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], + "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); } } void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; struct intel_cx0pll_state mpll_hw_state = {}; - if (DISPLAY_VER(i915) < 14) + if (DISPLAY_VER(display) < 14) return; if (!new_crtc_state->hw.active) diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index 9004b99bb51f..711168882684 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -7,17 +7,15 @@ #define __INTEL_CX0_PHY_H__ #include <linux/types.h> -#include <linux/bitfield.h> -#include <linux/bits.h> enum icl_port_dpll_id; -struct drm_i915_private; struct intel_atomic_state; struct intel_c10pll_state; struct intel_c20pll_state; -struct intel_cx0pll_state; struct intel_crtc; struct intel_crtc_state; +struct intel_cx0pll_state; +struct intel_display; struct intel_encoder; struct intel_hdmi; @@ -35,7 +33,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_cx0pll_state *pll_state); -void intel_cx0pll_dump_hw_state(struct drm_i915_private *dev_priv, +void intel_cx0pll_dump_hw_state(struct intel_display *display, const struct intel_cx0pll_state *hw_state); void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h index ab3ae110b68f..f0e5c196eae4 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -273,13 +273,15 @@ #define _XE2HPD_C20_A_MPLLB_CFG 0xCCC2 #define _XE2HPD_C20_B_MPLLB_CFG 0xCCB6 -#define _IS_XE2HPD_C20(i915) (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) +#define _IS_XE2HPD_C20(i915) (DISPLAY_VERx100(i915) == 1401) #define PHY_C20_A_TX_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_TX_CNTX_CFG : _MTL_C20_A_TX_CNTX_CFG) - (idx)) #define PHY_C20_B_TX_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_B_TX_CNTX_CFG : _MTL_C20_B_TX_CNTX_CFG) - (idx)) #define C20_PHY_TX_RATE REG_GENMASK(2, 0) +#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0) +#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val)) #define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \ ((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx)) @@ -363,4 +365,7 @@ #define HDMI_DIV_MASK REG_GENMASK16(2, 0) #define HDMI_DIV(val) REG_FIELD_PREP16(HDMI_DIV_MASK, val) +#define PICA_PHY_CONFIG_CONTROL _MMIO(0x16FE68) +#define EDP_ON_TYPEC REG_BIT(31) + #endif /* __INTEL_CX0_REG_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b1c294236cc8..49b5cc01ce40 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -54,6 +54,7 @@ #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" @@ -2235,7 +2236,7 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, enable ? DP_FEC_READY : 0) <= 0) drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n", - enable ? "enabled" : "disabled"); + str_enabled_disabled(enable)); if (enable && drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS, @@ -2255,9 +2256,9 @@ static int read_fec_detected_status(struct drm_dp_aux *aux) return status; } -static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) +static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) { - struct drm_i915_private *i915 = to_i915(aux->drm_dev); + struct intel_display *display = to_intel_display(aux->drm_dev); int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED; int status; int err; @@ -2266,57 +2267,92 @@ static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) status & mask || status < 0, 10000, 200000); - if (!err && status >= 0) - return; + if (err || status < 0) { + drm_dbg_kms(display->drm, + "Failed waiting for FEC %s to get detected: %d (status %d)\n", + str_enabled_disabled(enabled), err, status); + return err ? err : status; + } - if (err == -ETIMEDOUT) - drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n", - str_enabled_disabled(enabled)); - else - drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status); + return 0; } -void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - bool enabled) +int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int ret; if (!crtc_state->fec_enable) - return; + return 0; if (enabled) - ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), + ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_FEC_ENABLE_LIVE, 1); else - ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state), + ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_FEC_ENABLE_LIVE, 1); - if (ret) - drm_err(&i915->drm, + if (ret) { + drm_err(display->drm, "Timeout waiting for FEC live state to get %s\n", str_enabled_disabled(enabled)); - + return ret; + } /* * At least the Synoptics MST hub doesn't set the detected flag for * FEC decoding disabling so skip waiting for that. */ - if (enabled) - wait_for_fec_detected(&intel_dp->aux, enabled); + if (enabled) { + ret = wait_for_fec_detected(&intel_dp->aux, enabled); + if (ret) + return ret; + } + + return 0; } static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + int i; + int ret; if (!crtc_state->fec_enable) return; - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), 0, DP_TP_CTL_FEC_ENABLE); + + if (DISPLAY_VER(display) < 30) + return; + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true); + if (!ret) + return; + + for (i = 0; i < 3; i++) { + drm_dbg_kms(display->drm, "Retry FEC enabling\n"); + + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_FEC_ENABLE, 0); + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, false); + if (ret) + continue; + + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + 0, DP_TP_CTL_FEC_ENABLE); + + ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true); + if (!ret) + return; + } + + drm_err(display->drm, "Failed to enable FEC after retries\n"); } static void intel_ddi_disable_fec(struct intel_encoder *encoder, @@ -3115,11 +3151,12 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *pipe_crtc; + int i; - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -3130,8 +3167,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -3382,8 +3418,9 @@ static void intel_enable_ddi(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *pipe_crtc; + int i; intel_ddi_enable_transcoder_func(encoder, crtc_state); @@ -3394,8 +3431,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_ddi_wait_for_fec_status(encoder, crtc_state, true); - for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -3477,6 +3513,13 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, drm_connector_update_privacy_screen(conn_state); } +static void intel_ddi_update_pipe_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + intel_hdmi_fastset_infoframes(encoder, crtc_state, conn_state); +} + void intel_ddi_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3488,6 +3531,10 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state, intel_ddi_update_pipe_dp(state, encoder, crtc_state, conn_state); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + intel_ddi_update_pipe_hdmi(encoder, crtc_state, + conn_state); + intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state); } @@ -4391,6 +4438,7 @@ static void intel_ddi_encoder_reset(struct drm_encoder *encoder) struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); intel_pps_encoder_reset(intel_dp); @@ -4550,12 +4598,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder, enum intel_hotplug_state state; int ret; - if (intel_dp->compliance.test_active && - intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { - intel_dp_phy_test(encoder); - /* just do the PHY test and nothing else */ + if (intel_dp_test_phy(intel_dp)) return INTEL_HOTPLUG_UNCHANGED; - } state = intel_encoder_hotplug(encoder, connector); @@ -4888,7 +4932,7 @@ void intel_ddi_init(struct intel_display *display, if (!assert_has_icl_dsi(dev_priv)) return; - icl_dsi_init(dev_priv, devdata); + icl_dsi_init(display, devdata); return; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 6d85422bdefe..640851d46b1b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -63,9 +63,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); -void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - bool enabled); +int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index e881bfeafb47..bb51f974e9e2 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "intel_dsb.h" #include "intel_uncore.h" static inline struct intel_uncore *__to_uncore(struct intel_display *display) @@ -31,7 +32,7 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg) #define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__) static inline u8 -__intel_de_read8(struct intel_display *display, i915_reg_t reg) +intel_de_read8(struct intel_display *display, i915_reg_t reg) { u8 val; @@ -43,11 +44,10 @@ __intel_de_read8(struct intel_display *display, i915_reg_t reg) return val; } -#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__) static inline u64 -__intel_de_read64_2x32(struct intel_display *display, - i915_reg_t lower_reg, i915_reg_t upper_reg) +intel_de_read64_2x32(struct intel_display *display, + i915_reg_t lower_reg, i915_reg_t upper_reg) { u64 val; @@ -62,7 +62,6 @@ __intel_de_read64_2x32(struct intel_display *display, return val; } -#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__) static inline void __intel_de_posting_read(struct intel_display *display, i915_reg_t reg) @@ -87,12 +86,11 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val) #define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__) static inline u32 -____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, - u32 clear, u32 set) +__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, + u32 clear, u32 set) { return intel_uncore_rmw(__to_uncore(display), reg, clear, set); } -#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__) static inline u32 __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, @@ -111,18 +109,17 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, #define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__) static inline int -____intel_de_wait_for_register_nowl(struct intel_display *display, - i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +__intel_de_wait_for_register_nowl(struct intel_display *display, + i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { return intel_wait_for_register(__to_uncore(display), reg, mask, value, timeout); } -#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +intel_de_wait(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { int ret; @@ -135,11 +132,10 @@ __intel_de_wait(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { int ret; @@ -152,13 +148,12 @@ __intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, - u32 mask, u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, u32 *out_value) +intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, u32 *out_value) { int ret; @@ -172,7 +167,6 @@ __intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, return ret; } -#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__) static inline int __intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, @@ -219,18 +213,25 @@ __intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) #define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__) static inline u32 -__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) +intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) { return intel_uncore_read_notrace(__to_uncore(display), reg); } -#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__) static inline void -__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, - u32 val) +intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, u32 val) { intel_uncore_write_notrace(__to_uncore(display), reg, val); } -#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__) + +static __always_inline void +intel_de_write_dsb(struct intel_display *display, struct intel_dsb *dsb, + i915_reg_t reg, u32 val) +{ + if (dsb) + intel_dsb_reg_write(dsb, reg, val); + else + intel_de_write_fw(display, reg, val); +} #endif /* __INTEL_DE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b4ef4d59da1a..863927f429aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -43,9 +43,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> - -#include "gem/i915_gem_lmem.h" -#include "gem/i915_gem_object.h" +#include <drm/drm_vblank.h> #include "g4x_dp.h" #include "g4x_hdmi.h" @@ -60,6 +58,7 @@ #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" +#include "intel_bo.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" @@ -135,7 +134,8 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) @@ -253,6 +253,108 @@ static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state) return ffs(crtc_state->joiner_pipes) - 1; } +/* + * The following helper functions, despite being named for bigjoiner, + * are applicable to both bigjoiner and uncompressed joiner configurations. + */ +static bool is_bigjoiner(const struct intel_crtc_state *crtc_state) +{ + return hweight8(crtc_state->joiner_pipes) >= 2; +} + +static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!is_bigjoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state)); +} + +static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!is_bigjoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return false; + + return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state); +} + +bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return false; + + return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state); +} + +u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!is_bigjoiner(crtc_state)) + return BIT(crtc->pipe); + + return bigjoiner_primary_pipes(crtc_state); +} + +u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state) +{ + return bigjoiner_secondary_pipes(crtc_state); +} + +bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state) +{ + return intel_crtc_num_joined_pipes(crtc_state) >= 4; +} + +static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!intel_crtc_is_ultrajoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return intel_crtc_is_ultrajoiner(crtc_state) && + BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state); +} + +/* + * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or + * any other logic, so lets just add helper function to + * at least hide this hassle.. + */ +static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state) +{ + if (!intel_crtc_is_ultrajoiner(crtc_state)) + return 0; + + return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state)); +} + +bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return intel_crtc_is_ultrajoiner(crtc_state) && + BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state); +} + u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state) { if (crtc_state->joiner_pipes) @@ -277,9 +379,9 @@ bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state) crtc->pipe == joiner_primary_pipe(crtc_state); } -static int intel_joiner_num_pipes(const struct intel_crtc_state *crtc_state) +int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state) { - return hweight8(crtc_state->joiner_pipes); + return hweight8(intel_crtc_joined_pipe_mask(crtc_state)); } u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) @@ -291,10 +393,10 @@ u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_is_joiner_secondary(crtc_state)) - return intel_crtc_for_pipe(i915, joiner_primary_pipe(crtc_state)); + return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state)); else return to_intel_crtc(crtc_state->uapi.crtc); } @@ -320,6 +422,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) void assert_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -340,24 +443,24 @@ void assert_transcoder(struct drm_i915_private *dev_priv, cur_state = false; } - I915_STATE_WARN(dev_priv, cur_state != state, - "transcoder %s assertion failure (expected %s, current %s)\n", - transcoder_name(cpu_transcoder), str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "transcoder %s assertion failure (expected %s, current %s)\n", + transcoder_name(cpu_transcoder), str_on_off(state), + str_on_off(cur_state)); } static void assert_plane(struct intel_plane *plane, bool state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe; bool cur_state; cur_state = plane->get_hw_state(plane, &pipe); - I915_STATE_WARN(i915, cur_state != state, - "%s assertion failure (expected %s, current %s)\n", - plane->base.name, str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "%s assertion failure (expected %s, current %s)\n", + plane->base.name, str_on_off(state), + str_on_off(cur_state)); } #define assert_plane_enabled(p) assert_plane(p, true) @@ -372,7 +475,7 @@ static void assert_planes_disabled(struct intel_crtc *crtc) assert_plane_disabled(plane); } -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, +void vlv_wait_port_ready(struct intel_display *display, struct intel_digital_port *dig_port, unsigned int expected_mask) { @@ -385,11 +488,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, fallthrough; case PORT_B: port_mask = DPLL_PORTB_READY_MASK; - dpll_reg = DPLL(dev_priv, 0); + dpll_reg = DPLL(display, 0); break; case PORT_C: port_mask = DPLL_PORTC_READY_MASK; - dpll_reg = DPLL(dev_priv, 0); + dpll_reg = DPLL(display, 0); expected_mask <<= 4; break; case PORT_D: @@ -398,11 +501,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, break; } - if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) - drm_WARN(&dev_priv->drm, 1, + if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000)) + drm_WARN(display->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", dig_port->base.base.base.id, dig_port->base.base.name, - intel_de_read(dev_priv, dpll_reg) & port_mask, + intel_de_read(display, dpll_reg) & port_mask, expected_mask); } @@ -715,7 +818,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); intel_crtc_wait_for_next_vblank(crtc); } @@ -759,7 +862,7 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) */ if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; - else if (DISPLAY_VER(dev_priv) >= 13) + else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30)) tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; /* Wa_14010547955:dg2 */ @@ -1116,6 +1219,22 @@ static void intel_post_plane_update(struct intel_atomic_state *state, intel_encoders_audio_enable(state, crtc); } +static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ + hsw_ips_post_update(state, crtc); + + /* + * Activate DRRS after state readout to avoid + * dp_m_n vs. dp_m2_n2 confusion on BDW+. + */ + intel_drrs_activate(new_crtc_state); +} + static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1172,8 +1291,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, * Apart from the async flip bit we want to * preserve the old state for the plane. */ - intel_plane_async_flip(plane, old_crtc_state, - old_plane_state, false); + intel_plane_async_flip(NULL, plane, + old_crtc_state, old_plane_state, false); need_vbl_wait = true; } } @@ -1249,8 +1368,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * * WaCxSRDisabledForSpriteScaling:ivb */ - if (old_crtc_state->hw.active && - new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) + if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active && + new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) intel_crtc_wait_for_next_vblank(crtc); /* @@ -1315,7 +1434,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(NULL, plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -1502,14 +1621,6 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state, } } -static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - - plane->disable_arm(plane, crtc_state); -} - static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1575,11 +1686,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -1677,23 +1784,22 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta static void hsw_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; struct intel_crtc *pipe_crtc; + int i; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) - intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) + intel_dmc_enable_pipe(display, pipe_crtc->pipe); intel_encoders_pre_pll_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1703,27 +1809,25 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); intel_dsc_enable(pipe_crtc_state); - if (DISPLAY_VER(dev_priv) >= 13) + if (HAS_UNCOMPRESSED_JOINER(dev_priv)) intel_uncompressed_joiner_enable(pipe_crtc_state); intel_set_pipe_src_size(pipe_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(pipe_crtc_state); + bdw_set_pipe_misc(NULL, pipe_crtc_state); } if (!transcoder_is_dsi(cpu_transcoder)) hsw_configure_cpu_transcoder(new_crtc_state); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1741,12 +1845,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(pipe_crtc_state); - intel_color_commit_noarm(pipe_crtc_state); - intel_color_commit_arm(pipe_crtc_state); - /* update DSPCNTR to configure gamma/csc for pipe bottom color */ - if (DISPLAY_VER(dev_priv) < 9) - intel_disable_primary_plane(pipe_crtc_state); + intel_color_modeset(pipe_crtc_state); hsw_set_linetime_wm(pipe_crtc_state); @@ -1758,8 +1857,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_enable(state, crtc); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(new_crtc_state)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); enum pipe hsw_workaround_pipe; @@ -1776,7 +1874,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { struct intel_crtc *wa_crtc = - intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + intel_crtc_for_pipe(display, hsw_workaround_pipe); intel_crtc_wait_for_next_vblank(wa_crtc); intel_crtc_wait_for_next_vblank(wa_crtc); @@ -1841,10 +1939,11 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc *pipe_crtc; + int i; /* * FIXME collapse everything to one hook. @@ -1853,8 +1952,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); - for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1863,9 +1961,8 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_post_pll_disable(state, crtc); - for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) - intel_dmc_disable_pipe(i915, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) + intel_dmc_disable_pipe(display, pipe_crtc->pipe); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -2147,11 +2244,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -2187,11 +2280,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); if (!intel_initial_watermarks(state, crtc)) intel_update_watermarks(dev_priv); @@ -2224,9 +2313,10 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) static void i9xx_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* @@ -2265,7 +2355,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, /* clock the pipe down to 640x480@60 to potentially save power */ if (IS_I830(dev_priv)) - i830_enable_pipe(dev_priv, pipe); + i830_enable_pipe(display, pipe); } void intel_encoder_destroy(struct drm_encoder *encoder) @@ -2343,9 +2433,9 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state, struct drm_display_mode *mode) { - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); - if (num_pipes < 2) + if (num_pipes == 1) return; mode->crtc_clock /= num_pipes; @@ -2407,7 +2497,7 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state drm_mode_copy(mode, pipe_mode); intel_mode_from_crtc_timings(mode, mode); mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * - (intel_joiner_num_pipes(crtc_state) ?: 1); + intel_crtc_num_joined_pipes(crtc_state); mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); /* Derive per-pipe timings in case joiner is used */ @@ -2427,10 +2517,10 @@ void intel_encoder_get_config(struct intel_encoder *encoder, static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state) { - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); int width, height; - if (num_pipes < 2) + if (num_pipes == 1) return; width = drm_rect_width(&crtc_state->pipe_src); @@ -2520,13 +2610,29 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) return 0; } +static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + return intel_vrr_possible(crtc_state) && crtc_state->has_psr && + adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay && + IS_DISPLAY_VER(display, 13, 14); +} + static int intel_crtc_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; int ret; + /* Wa_14015401596 */ + if (intel_crtc_needs_wa_14015401596(crtc_state)) + adjusted_mode->crtc_vblank_start += 1; + ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; @@ -2887,11 +2993,11 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc, static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - int num_pipes = intel_joiner_num_pipes(crtc_state); + int num_pipes = intel_crtc_num_joined_pipes(crtc_state); enum pipe primary_pipe, pipe = crtc->pipe; int width; - if (num_pipes < 2) + if (num_pipes == 1) return; primary_pipe = joiner_primary_pipe(crtc_state); @@ -3246,9 +3352,11 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); } -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = 0; @@ -3293,7 +3401,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(dev_priv)) val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; - intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); + intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); } int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) @@ -3534,23 +3642,57 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, return tmp & TRANS_DDI_FUNC_ENABLE; } -static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, - u8 *primary_pipes, u8 *secondary_pipes) +static void enabled_uncompressed_joiner_pipes(struct intel_display *display, + u8 *primary_pipes, u8 *secondary_pipes) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_crtc *crtc; + + *primary_pipes = 0; + *secondary_pipes = 0; + + if (!HAS_UNCOMPRESSED_JOINER(display)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + power_domain = POWER_DOMAIN_PIPE(pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); + + if (tmp & UNCOMPRESSED_JOINER_PRIMARY) + *primary_pipes |= BIT(pipe); + if (tmp & UNCOMPRESSED_JOINER_SECONDARY) + *secondary_pipes |= BIT(pipe); + } + } +} + +static void enabled_bigjoiner_pipes(struct intel_display *display, + u8 *primary_pipes, u8 *secondary_pipes) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_crtc *crtc; *primary_pipes = 0; *secondary_pipes = 0; - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, - joiner_pipes(dev_priv)) { + if (!HAS_BIGJOINER(display)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; - power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); - with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { - u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); if (!(tmp & BIG_JOINER_ENABLE)) continue; @@ -3560,56 +3702,197 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, else *secondary_pipes |= BIT(pipe); } + } +} - if (DISPLAY_VER(dev_priv) < 13) - continue; +static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes) +{ + u8 secondary_pipes = 0; - power_domain = POWER_DOMAIN_PIPE(pipe); - with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { - u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + for (int i = 1; i < num_pipes; i++) + secondary_pipes |= primary_pipes << i; - if (tmp & UNCOMPRESSED_JOINER_PRIMARY) - *primary_pipes |= BIT(pipe); - if (tmp & UNCOMPRESSED_JOINER_SECONDARY) - *secondary_pipes |= BIT(pipe); - } - } + return secondary_pipes; +} - /* Joiner pipes should always be consecutive primary and secondary */ - drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1, - "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n", - *primary_pipes, *secondary_pipes); +static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes) +{ + return expected_secondary_pipes(uncompjoiner_primary_pipes, 2); } -static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) +static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes) { - if ((secondary_pipes & BIT(pipe)) == 0) - return pipe; + return expected_secondary_pipes(bigjoiner_primary_pipes, 2); +} - /* ignore everything above our pipe */ - primary_pipes &= ~GENMASK(7, pipe); +static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes) +{ + primary_pipes &= GENMASK(pipe, 0); - /* highest remaining bit should be our primary pipe */ - return fls(primary_pipes) - 1; + return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0; } -static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes) +static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes) { - enum pipe primary_pipe, next_primary_pipe; + return expected_secondary_pipes(ultrajoiner_primary_pipes, 4); +} - primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes); +static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes, + u8 ultrajoiner_secondary_pipes) +{ + return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3; +} - if ((primary_pipes & BIT(primary_pipe)) == 0) - return 0; +static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915, + u8 *primary_pipes, u8 *secondary_pipes) +{ + struct intel_crtc *crtc; + + *primary_pipes = 0; + *secondary_pipes = 0; + + if (!HAS_ULTRAJOINER(i915)) + return; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, + joiner_pipes(i915)) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe); + with_intel_display_power_if_enabled(i915, power_domain, wakeref) { + u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); + + if (!(tmp & ULTRA_JOINER_ENABLE)) + continue; + + if (tmp & PRIMARY_ULTRA_JOINER_ENABLE) + *primary_pipes |= BIT(pipe); + else + *secondary_pipes |= BIT(pipe); + } + } +} + +static void enabled_joiner_pipes(struct drm_i915_private *dev_priv, + enum pipe pipe, + u8 *primary_pipe, u8 *secondary_pipes) +{ + struct intel_display *display = to_intel_display(&dev_priv->drm); + u8 primary_ultrajoiner_pipes; + u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes; + u8 secondary_ultrajoiner_pipes; + u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes; + u8 ultrajoiner_pipes; + u8 uncompressed_joiner_pipes, bigjoiner_pipes; + + enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes, + &secondary_ultrajoiner_pipes); + /* + * For some strange reason the last pipe in the set of four + * shouldn't have ultrajoiner enable bit set in hardware. + * Set the bit anyway to make life easier. + */ + drm_WARN_ON(&dev_priv->drm, + expected_secondary_pipes(primary_ultrajoiner_pipes, 3) != + secondary_ultrajoiner_pipes); + secondary_ultrajoiner_pipes = + fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes, + secondary_ultrajoiner_pipes); + + drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0); + + enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes, + &secondary_uncompressed_joiner_pipes); + + drm_WARN_ON(display->drm, + (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0); + + enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes, + &secondary_bigjoiner_pipes); + + drm_WARN_ON(display->drm, + (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0); + + ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes; + uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes | + secondary_uncompressed_joiner_pipes; + bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes; + + drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes, + "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n", + ultrajoiner_pipes, bigjoiner_pipes); + + drm_WARN(display->drm, secondary_ultrajoiner_pipes != + expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), + "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n", + expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes), + secondary_ultrajoiner_pipes); + + drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0, + "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n", + uncompressed_joiner_pipes, bigjoiner_pipes); + + drm_WARN(display->drm, secondary_bigjoiner_pipes != + expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), + "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n", + expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes), + secondary_bigjoiner_pipes); + + drm_WARN(display->drm, secondary_uncompressed_joiner_pipes != + expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), + "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n", + expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes), + secondary_uncompressed_joiner_pipes); + + *primary_pipe = 0; + *secondary_pipes = 0; + + if (ultrajoiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes); + *secondary_pipes = secondary_ultrajoiner_pipes & + expected_ultrajoiner_secondary_pipes(*primary_pipe); + + drm_WARN(display->drm, + expected_ultrajoiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_ultrajoiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } + + if (uncompressed_joiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes); + *secondary_pipes = secondary_uncompressed_joiner_pipes & + expected_uncompressed_joiner_secondary_pipes(*primary_pipe); + + drm_WARN(display->drm, + expected_uncompressed_joiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_uncompressed_joiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } - /* ignore our primary pipe and everything below it */ - primary_pipes &= ~GENMASK(primary_pipe, 0); - /* make sure a high bit is set for the ffs() */ - primary_pipes |= BIT(7); - /* lowest remaining bit should be the next primary pipe */ - next_primary_pipe = ffs(primary_pipes) - 1; + if (bigjoiner_pipes & BIT(pipe)) { + *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes); + *secondary_pipes = secondary_bigjoiner_pipes & + expected_bigjoiner_secondary_pipes(*primary_pipe); - return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe); + drm_WARN(display->drm, + expected_bigjoiner_secondary_pipes(*primary_pipe) != + *secondary_pipes, + "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n", + *primary_pipe, + expected_bigjoiner_secondary_pipes(*primary_pipe), + *secondary_pipes); + return; + } } static u8 hsw_panel_transcoders(struct drm_i915_private *i915) @@ -3628,7 +3911,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); enum transcoder cpu_transcoder; - u8 primary_pipes, secondary_pipes; + u8 primary_pipe, secondary_pipes; u8 enabled_transcoders = 0; /* @@ -3681,10 +3964,9 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) enabled_transcoders |= BIT(cpu_transcoder); /* joiner secondary -> consider the primary pipe's transcoder as well */ - enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes); + enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes); if (secondary_pipes & BIT(crtc->pipe)) { - cpu_transcoder = (enum transcoder) - get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes); + cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1; if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); } @@ -3815,17 +4097,15 @@ static void intel_joiner_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - u8 primary_pipes, secondary_pipes; + u8 primary_pipe, secondary_pipes; enum pipe pipe = crtc->pipe; - enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes); + enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes); - if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0) + if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0) return; - crtc_state->joiner_pipes = - BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) | - get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes); + crtc_state->joiner_pipes = primary_pipe | secondary_pipes; } static bool hsw_get_pipe_config(struct intel_crtc *crtc, @@ -3986,7 +4266,7 @@ int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc_state *crtc_state; struct drm_display_mode *mode; struct intel_crtc *crtc; @@ -3995,7 +4275,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!encoder->get_hw_state(encoder, &pipe)) return NULL; - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -4285,22 +4565,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (ret) return ret; - ret = intel_compute_pipe_wm(state, crtc); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "Target pipe watermarks are invalid\n"); - return ret; - } - - /* - * Calculate 'intermediate' watermarks that satisfy both the - * old state and the new state. We can program these - * immediately. - */ - ret = intel_compute_intermediate_wm(state, crtc); + ret = intel_wm_compute(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, - "No valid intermediate pipe watermarks are possible\n"); + "[CRTC:%d:%s] watermarks are invalid\n", + crtc->base.base.id, crtc->base.name); return ret; } @@ -4798,6 +5067,8 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state, struct drm_connector *connector; int i; + intel_vrr_compute_config_late(crtc_state); + for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = @@ -5035,15 +5306,15 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset, const struct intel_cx0pll_state *a, const struct intel_cx0pll_state *b) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); char *chipname = a->use_c10 ? "C10" : "C20"; pipe_config_mismatch(p, fastset, crtc, name, chipname); drm_printf(p, "expected:\n"); - intel_cx0pll_dump_hw_state(i915, a); + intel_cx0pll_dump_hw_state(display, a); drm_printf(p, "found:\n"); - intel_cx0pll_dump_hw_state(i915, b); + intel_cx0pll_dump_hw_state(display, b); } bool @@ -5431,7 +5702,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_INFOFRAME(avi); PIPE_CONF_CHECK_INFOFRAME(spd); PIPE_CONF_CHECK_INFOFRAME(hdmi); - PIPE_CONF_CHECK_INFOFRAME(drm); + if (!fastset) + PIPE_CONF_CHECK_INFOFRAME(drm); PIPE_CONF_CHECK_DP_VSC_SDP(vsc); PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); @@ -6732,17 +7004,12 @@ int intel_atomic_check(struct drm_device *dev, static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { - struct intel_crtc_state __maybe_unused *crtc_state; - struct intel_crtc *crtc; - int i, ret; + int ret; ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); if (ret < 0) return ret; - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - intel_color_prepare_commit(state, crtc); - return 0; } @@ -6823,12 +7090,12 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, * During modesets pipe configuration was programmed as the * CRTC was enabled. */ - if (!modeset) { + if (!modeset && !new_crtc_state->use_dsb) { if (intel_crtc_needs_color_update(new_crtc_state)) - intel_color_commit_arm(new_crtc_state); + intel_color_commit_arm(NULL, new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(new_crtc_state); + bdw_set_pipe_misc(NULL, new_crtc_state); if (intel_crtc_needs_fastset(new_crtc_state)) intel_pipe_fastset(old_crtc_state, new_crtc_state); @@ -6925,10 +7192,12 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); if (!modeset && - intel_crtc_needs_color_update(new_crtc_state)) - intel_color_commit_noarm(new_crtc_state); + intel_crtc_needs_color_update(new_crtc_state) && + !new_crtc_state->use_dsb) + intel_color_commit_noarm(NULL, new_crtc_state); - intel_crtc_planes_update_noarm(state, crtc); + if (!new_crtc_state->use_dsb) + intel_crtc_planes_update_noarm(NULL, state, crtc); } static void intel_update_crtc(struct intel_atomic_state *state, @@ -6939,16 +7208,25 @@ static void intel_update_crtc(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(state, crtc); + if (new_crtc_state->use_dsb) { + intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); - commit_pipe_pre_planes(state, crtc); + intel_dsb_commit(new_crtc_state->dsb_commit, false); + } else { + /* Perform vblank evasion around commit operation */ + intel_pipe_update_start(state, crtc); - intel_crtc_planes_update_arm(state, crtc); + if (new_crtc_state->dsb_commit) + intel_dsb_commit(new_crtc_state->dsb_commit, false); - commit_pipe_post_planes(state, crtc); + commit_pipe_pre_planes(state, crtc); - intel_pipe_update_end(state, crtc); + intel_crtc_planes_update_arm(NULL, state, crtc); + + commit_pipe_post_planes(state, crtc); + + intel_pipe_update_end(state, crtc); + } /* * VRR/Seamless M/N update may need to update frame timings. @@ -7273,6 +7551,24 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat } } +static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->dsb_commit) + intel_dsb_wait(crtc_state->dsb_commit); + + intel_color_wait_commit(crtc_state); +} + +static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->dsb_commit) { + intel_dsb_cleanup(crtc_state->dsb_commit); + crtc_state->dsb_commit = NULL; + } + + intel_color_cleanup_commit(crtc_state); +} + static void intel_atomic_cleanup_work(struct work_struct *work) { struct intel_atomic_state *state = @@ -7283,7 +7579,7 @@ static void intel_atomic_cleanup_work(struct work_struct *work) int i; for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) - intel_color_cleanup_commit(old_crtc_state); + intel_atomic_dsb_cleanup(old_crtc_state); drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); drm_atomic_helper_commit_cleanup_done(&state->base); @@ -7324,15 +7620,93 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s * caller made sure that the object is synced wrt. the related color clear value * GPU write on it. */ - ret = i915_gem_object_read_from_page(intel_fb_obj(fb), - fb->offsets[cc_plane] + 16, - &plane_state->ccval, - sizeof(plane_state->ccval)); + ret = intel_bo_read_from_page(intel_fb_bo(fb), + fb->offsets[cc_plane] + 16, + &plane_state->ccval, + sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ drm_WARN_ON(&i915->drm, ret); } } +static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + intel_color_prepare_commit(state, crtc); +} + +static void intel_atomic_dsb_finish(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!new_crtc_state->hw.active) + return; + + if (state->base.legacy_cursor_update) + return; + + /* FIXME deal with everything */ + new_crtc_state->use_dsb = + new_crtc_state->update_planes && + !new_crtc_state->vrr.enable && + !new_crtc_state->do_async_flip && + !new_crtc_state->has_psr && + !new_crtc_state->scaler_state.scaler_users && + !old_crtc_state->scaler_state.scaler_users && + !intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state); + + if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank) + return; + + /* + * Rough estimate: + * ~64 registers per each plane * 8 planes = 512 + * Double that for pipe stuff and other overhead. + */ + new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, + new_crtc_state->use_dsb ? 1024 : 16); + if (!new_crtc_state->dsb_commit) { + new_crtc_state->use_dsb = false; + intel_color_cleanup_commit(new_crtc_state); + return; + } + + if (new_crtc_state->use_dsb) { + if (intel_crtc_needs_color_update(new_crtc_state)) + intel_color_commit_noarm(new_crtc_state->dsb_commit, + new_crtc_state); + intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit, + state, crtc); + + intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); + + if (intel_crtc_needs_color_update(new_crtc_state)) + intel_color_commit_arm(new_crtc_state->dsb_commit, + new_crtc_state); + bdw_set_pipe_misc(new_crtc_state->dsb_commit, + new_crtc_state); + intel_crtc_planes_update_arm(new_crtc_state->dsb_commit, + state, crtc); + + if (!new_crtc_state->dsb_color_vblank) { + intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1); + intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit); + intel_dsb_interrupt(new_crtc_state->dsb_commit); + } + } + + if (new_crtc_state->dsb_color_vblank) + intel_dsb_chain(state, new_crtc_state->dsb_commit, + new_crtc_state->dsb_color_vblank, true); + + intel_dsb_finish(new_crtc_state->dsb_commit); +} + static void intel_atomic_commit_tail(struct intel_atomic_state *state) { struct drm_device *dev = state->base.dev; @@ -7340,13 +7714,21 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; int i; + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_atomic_dsb_prepare(state, crtc); + intel_atomic_commit_fence_wait(state); intel_td_flush(dev_priv); + intel_atomic_prepare_plane_clear_colors(state); + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_atomic_dsb_finish(state, crtc); + drm_atomic_helper_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base); intel_atomic_global_state_wait_for_dependencies(state); @@ -7380,8 +7762,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); - intel_atomic_prepare_plane_clear_colors(state); - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state) || @@ -7462,7 +7842,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (new_crtc_state->do_async_flip) intel_crtc_disable_flip_done(state, crtc); - intel_color_wait_commit(new_crtc_state); + intel_atomic_dsb_wait_commit(new_crtc_state); } /* @@ -7497,14 +7877,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_modeset_verify_crtc(state, crtc); - /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ - hsw_ips_post_update(state, crtc); - - /* - * Activate DRRS after state readout to avoid - * dp_m_n vs. dp_m2_n2 confusion on BDW+. - */ - intel_drrs_activate(new_crtc_state); + intel_post_plane_update_after_readout(state, crtc); /* * DSB cleanup is done in cleanup_work aligning with framebuffer @@ -7514,7 +7887,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * FIXME get rid of this funny new->old swapping */ old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank); - old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit); + old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit); } /* Underruns don't always raise interrupts, so check manually */ @@ -7661,13 +8034,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, ret = intel_atomic_swap_state(state); if (ret) { - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) - intel_color_cleanup_commit(new_crtc_state); - drm_atomic_helper_unprepare_planes(dev, &state->base); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; @@ -7702,23 +8068,6 @@ void intel_plane_destroy(struct drm_plane *plane) kfree(to_intel_plane(plane)); } -int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; - struct drm_crtc *drmmode_crtc; - struct intel_crtc *crtc; - - drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); - if (!drmmode_crtc) - return -ENOENT; - - crtc = to_intel_crtc(drmmode_crtc); - pipe_from_crtc_id->pipe = crtc->pipe; - - return 0; -} - static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -7800,7 +8149,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (HAS_DDI(dev_priv)) { if (intel_ddi_crt_present(dev_priv)) - intel_crt_init(dev_priv); + intel_crt_init(display); intel_bios_for_each_encoder(display, intel_ddi_init); @@ -7815,7 +8164,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) * incorrect sharing of the PPS. */ intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); @@ -7846,7 +8195,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) bool has_edp, has_port; if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) - intel_crt_init(dev_priv); + intel_crt_init(display); /* * The DP_DETECTED bit is the latched state of the DDC @@ -7892,14 +8241,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) vlv_dsi_init(dev_priv); } else if (IS_PINEVIEW(dev_priv)) { intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { bool found = false; if (IS_MOBILE(dev_priv)) intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); @@ -7941,7 +8290,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); - intel_crt_init(dev_priv); + intel_crt_init(display); intel_dvo_init(dev_priv); } @@ -7961,8 +8310,9 @@ static int max_dotclock(struct drm_i915_private *i915) { int max_dotclock = i915->display.cdclk.max_dotclk_freq; - /* icl+ might use joiner */ - if (DISPLAY_VER(i915) >= 11) + if (HAS_ULTRAJOINER(i915)) + max_dotclock *= 4; + else if (HAS_UNCOMPRESSED_JOINER(i915) || HAS_BIGJOINER(i915)) max_dotclock *= 2; return max_dotclock; @@ -8086,7 +8436,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, const struct drm_display_mode *mode, - bool joiner) + int num_joined_pipes) { int plane_width_max, plane_height_max; @@ -8102,8 +8452,11 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, * plane so let's not advertize modes that are * too big for that. */ - if (DISPLAY_VER(dev_priv) >= 11) { - plane_width_max = 5120 << joiner; + if (DISPLAY_VER(dev_priv) >= 30) { + plane_width_max = 6144 * num_joined_pipes; + plane_height_max = 4800; + } else if (DISPLAY_VER(dev_priv) >= 11) { + plane_width_max = 5120 * num_joined_pipes; plane_height_max = 4320; } else { plane_width_max = 5120; @@ -8255,9 +8608,9 @@ out: return ret; } -void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +void i830_enable_pipe(struct intel_display *display, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); enum transcoder cpu_transcoder = (enum transcoder)pipe; /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { @@ -8270,10 +8623,10 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) u32 dpll, fp; int i; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, i9xx_calc_dpll_params(48000, &clock) != 25154); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", pipe_name(pipe), clock.vco, clock.dot); @@ -8285,35 +8638,35 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder), HACTIVE(640 - 1) | HTOTAL(800 - 1)); - intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder), HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); - intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder), HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); - intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), VACTIVE(480 - 1) | VTOTAL(525 - 1)); - intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder), VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); - intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder), VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); - intel_de_write(dev_priv, PIPESRC(dev_priv, pipe), + intel_de_write(display, PIPESRC(display, pipe), PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); - intel_de_write(dev_priv, FP0(pipe), fp); - intel_de_write(dev_priv, FP1(pipe), fp); + intel_de_write(display, FP0(pipe), fp); + intel_de_write(display, FP1(pipe), fp); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), + intel_de_write(display, DPLL(display, pipe), dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); + intel_de_write(display, DPLL(display, pipe), dpll); /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); /* The pixel multiplier can only be updated once the @@ -8321,46 +8674,46 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) * * So write it again. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); + intel_de_write(display, DPLL(display, pipe), dpll); /* We do this three times for luck */ for (i = 0; i < 3 ; i++) { - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), dpll); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), TRANSCONF_ENABLE); - intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe)); + intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE); + intel_de_posting_read(display, TRANSCONF(display, pipe)); intel_wait_for_pipe_scanline_moving(crtc); } -void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +void i830_disable_pipe(struct intel_display *display, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); - drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", + drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_A)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_B)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_C)) & DISP_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & MCURSOR_MODE_MASK); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_B)) & MCURSOR_MODE_MASK); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE); + drm_WARN_ON(display->drm, + intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK); + drm_WARN_ON(display->drm, + intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK); - intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), 0); - intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe)); + intel_de_write(display, TRANSCONF(display, pipe), 0); + intel_de_posting_read(display, TRANSCONF(display, pipe)); intel_wait_for_pipe_scanline_stopped(crtc); - intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS); + intel_de_posting_read(display, DPLL(display, pipe)); } void intel_hpd_poll_fini(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b21d9578d5db..caef04f655c5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -40,7 +40,6 @@ struct drm_encoder; struct drm_file; struct drm_format_info; struct drm_framebuffer; -struct drm_i915_gem_object; struct drm_i915_private; struct drm_mode_fb_cmd2; struct drm_modeset_acquire_ctx; @@ -52,6 +51,7 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_digital_port; +struct intel_display; struct intel_dp; struct intel_encoder; struct intel_initial_plane_config; @@ -94,16 +94,6 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder) return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; } -/* - * Global legacy plane identifier. Valid only for primary/sprite - * planes on pre-g4x, and only for primary planes on g4x-bdw. - */ -enum i9xx_plane_id { - PLANE_A, - PLANE_B, - PLANE_C, -}; - #define plane_name(p) ((p) + 'A') #define for_each_plane_id_on_crtc(__crtc, __p) \ @@ -401,6 +391,30 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +#define for_each_crtc_in_masks(display, crtc, first_pipes, second_pipes, i) \ + for ((i) = 0; \ + (i) < (I915_MAX_PIPES * 2) && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \ + (i)++) \ + for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i)) + +#define for_each_crtc_in_masks_reverse(display, crtc, first_pipes, second_pipes, i) \ + for ((i) = (I915_MAX_PIPES * 2 - 1); \ + (i) >= 0 && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \ + (i)--) \ + for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i)) + +#define for_each_pipe_crtc_modeset_disable(display, crtc, crtc_state, i) \ + for_each_crtc_in_masks(display, crtc, \ + _intel_modeset_primary_pipes(crtc_state), \ + _intel_modeset_secondary_pipes(crtc_state), \ + i) + +#define for_each_pipe_crtc_modeset_enable(display, crtc, crtc_state, i) \ + for_each_crtc_in_masks_reverse(display, crtc, \ + _intel_modeset_primary_pipes(crtc_state), \ + _intel_modeset_secondary_pipes(crtc_state), \ + i) + int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int intel_atomic_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -415,7 +429,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, const struct drm_display_mode *mode, - bool joiner); + int num_joined_pipes); enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915, const struct drm_display_mode *mode); @@ -425,7 +439,14 @@ bool is_trans_port_sync_master(const struct intel_crtc_state *state); u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state); +bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state); u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state); +u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state); +u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state); struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state); bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state); bool intel_pipe_config_compare(const struct intel_crtc_state *current_config, @@ -437,8 +458,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state); -void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); -void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); +void i830_enable_pipe(struct intel_display *display, enum pipe pipe); +void i830_disable_pipe(struct intel_display *display, enum pipe pipe); int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); @@ -470,16 +491,10 @@ bool intel_encoder_is_snps(struct intel_encoder *encoder); bool intel_encoder_is_tc(struct intel_encoder *encoder); enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder); -int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, +void vlv_wait_port_ready(struct intel_display *display, struct intel_digital_port *dig_port, unsigned int expected_mask); -struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd); bool intel_fuzzy_clock_check(int clock1, int clock2); @@ -570,21 +585,21 @@ void assert_transcoder(struct drm_i915_private *dev_priv, bool assert_port_valid(struct drm_i915_private *i915, enum port port); /* - * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity - * checks to check for unexpected conditions which may not necessarily be a user - * visible problem. This will either WARN() or DRM_ERROR() depending on the - * verbose_state_checks module param, to enable distros and users to tailor - * their preferred amount of i915 abrt spam. + * Use INTEL_DISPLAY_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw + * state sanity checks to check for unexpected conditions which may not + * necessarily be a user visible problem. This will either drm_WARN() or + * drm_err() depending on the verbose_state_checks module param, to enable + * distros and users to tailor their preferred amount of i915 abrt spam. */ -#define I915_STATE_WARN(__i915, condition, format...) ({ \ - struct drm_device *drm = &(__i915)->drm; \ +#define INTEL_DISPLAY_STATE_WARN(__display, condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \ - drm_err(drm, format); \ + if (!drm_WARN((__display)->drm, (__display)->params.verbose_state_checks, format)) \ + drm_err((__display)->drm, format); \ unlikely(__ret_warn_on); \ }) bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915); +int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 0a711114ff2b..45b7c6900adc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -81,10 +81,8 @@ struct intel_display_funcs { struct intel_wm_funcs { /* update_wm is for legacy wm management */ void (*update_wm)(struct drm_i915_private *dev_priv); - int (*compute_pipe_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_intermediate_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); + int (*compute_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); void (*initial_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*atomic_update_watermarks)(struct intel_atomic_state *state, @@ -286,6 +284,9 @@ struct intel_display { /* drm device backpointer */ struct drm_device *drm; + /* Platform (and subplatform, if any) identification */ + struct intel_display_platforms platform; + /* Display functions */ struct { /* Top level crtc-ish functions */ @@ -457,6 +458,10 @@ struct intel_display { /* For i915gm/i945gm vblank irq workaround */ u8 vblank_enabled; + int vblank_wa_num_pipes; + + struct work_struct vblank_dc_work; + u32 de_irq_mask[I915_MAX_PIPES]; u32 pipestat_irq_mask[I915_MAX_PIPES]; } irq; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f5f618199d39..11aff485d8fa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -3,6 +3,7 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> @@ -10,13 +11,13 @@ #include <drm/drm_fourcc.h> #include "hsw_ips.h" -#include "i915_debugfs.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_alpm.h" +#include "intel_bo.h" #include "intel_crtc.h" -#include "intel_de.h" #include "intel_crtc_state_dump.h" +#include "intel_de.h" #include "intel_display_debugfs.h" #include "intel_display_debugfs_params.h" #include "intel_display_power.h" @@ -26,7 +27,9 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_drrs.h" +#include "intel_fb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_hdcp.h" @@ -39,11 +42,28 @@ #include "intel_vdsc.h" #include "intel_wm.h" +static struct intel_display *node_to_intel_display(struct drm_info_node *node) +{ + return to_intel_display(node->minor->dev); +} + static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { return to_i915(node->minor->dev); } +static int intel_display_caps(struct seq_file *m, void *data) +{ + struct intel_display *display = node_to_intel_display(m->private); + struct drm_printer p = drm_seq_file_printer(m); + + intel_display_device_info_print(DISPLAY_INFO(display), + DISPLAY_RUNTIME_INFO(display), &p); + intel_display_params_dump(&display->params, display->drm->driver->name, &p); + + return 0; +} + static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -106,7 +126,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); - i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); + intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base)); seq_putc(m, '\n'); } #endif @@ -124,7 +144,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.format->cpp[0] * 8, fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); - i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); + intel_bo_describe(m, intel_fb_bo(&fb->base)); seq_putc(m, '\n'); } mutex_unlock(&dev_priv->drm.mode_config.fb_lock); @@ -424,7 +444,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) int num_scalers = crtc->num_scalers; int i; - /* Not all platformas have a scaler */ + /* Not all platforms have a scaler */ if (num_scalers) { seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d", num_scalers, @@ -773,198 +793,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) return 0; } -static ssize_t i915_displayport_test_active_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - char *input_buffer; - int status = 0; - struct drm_device *dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - int val = 0; - - dev = ((struct seq_file *)file->private_data)->private; - - if (len == 0) - return 0; - - input_buffer = memdup_user_nul(ubuf, len); - if (IS_ERR(input_buffer)) - return PTR_ERR(input_buffer); - - drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len); - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - status = kstrtoint(input_buffer, 10, &val); - if (status < 0) - break; - drm_dbg(dev, "Got %d for test active\n", val); - /* To prevent erroneous activation of the compliance - * testing code, only accept an actual value of 1 here - */ - if (val == 1) - intel_dp->compliance.test_active = true; - else - intel_dp->compliance.test_active = false; - } - } - drm_connector_list_iter_end(&conn_iter); - kfree(input_buffer); - if (status < 0) - return status; - - *offp += len; - return len; -} - -static int i915_displayport_test_active_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_active) - seq_puts(m, "1"); - else - seq_puts(m, "0"); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} - -static int i915_displayport_test_active_open(struct inode *inode, - struct file *file) -{ - return single_open(file, i915_displayport_test_active_show, - inode->i_private); -} - -static const struct file_operations i915_displayport_test_active_fops = { - .owner = THIS_MODULE, - .open = i915_displayport_test_active_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_displayport_test_active_write -}; - -static int i915_displayport_test_data_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_type == - DP_TEST_LINK_EDID_READ) - seq_printf(m, "%lx", - intel_dp->compliance.test_data.edid); - else if (intel_dp->compliance.test_type == - DP_TEST_LINK_VIDEO_PATTERN) { - seq_printf(m, "hdisplay: %d\n", - intel_dp->compliance.test_data.hdisplay); - seq_printf(m, "vdisplay: %d\n", - intel_dp->compliance.test_data.vdisplay); - seq_printf(m, "bpc: %u\n", - intel_dp->compliance.test_data.bpc); - } else if (intel_dp->compliance.test_type == - DP_TEST_LINK_PHY_TEST_PATTERN) { - seq_printf(m, "pattern: %d\n", - intel_dp->compliance.test_data.phytest.phy_pattern); - seq_printf(m, "Number of lanes: %d\n", - intel_dp->compliance.test_data.phytest.num_lanes); - seq_printf(m, "Link Rate: %d\n", - intel_dp->compliance.test_data.phytest.link_rate); - seq_printf(m, "level: %02x\n", - intel_dp->train_set[0]); - } - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); - -static int i915_displayport_test_type_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); - static ssize_t i915_fifo_underrun_reset_write(struct file *filp, const char __user *ubuf, @@ -1025,6 +853,7 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { }; static const struct drm_info_list intel_display_debugfs_list[] = { + {"intel_display_caps", intel_display_caps, 0}, {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, @@ -1037,37 +866,22 @@ static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_lpsp_status", i915_lpsp_status, 0}, }; -static const struct { - const char *name; - const struct file_operations *fops; -} intel_display_debugfs_files[] = { - {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_dp_test_data", &i915_displayport_test_data_fops}, - {"i915_dp_test_type", &i915_displayport_test_type_fops}, - {"i915_dp_test_active", &i915_displayport_test_active_fops}, -}; - void intel_display_debugfs_register(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; struct drm_minor *minor = i915->drm.primary; - int i; - for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { - debugfs_create_file(intel_display_debugfs_files[i].name, - 0644, - minor->debugfs_root, - to_i915(minor->dev), - intel_display_debugfs_files[i].fops); - } + debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root, + to_i915(minor->dev), &i915_fifo_underrun_reset_ops); drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); intel_bios_debugfs_register(display); - intel_cdclk_debugfs_register(i915); - intel_dmc_debugfs_register(i915); + intel_cdclk_debugfs_register(display); + intel_dmc_debugfs_register(display); + intel_dp_test_debugfs_register(display); intel_fbc_debugfs_register(display); intel_hpd_debugfs_register(i915); intel_opregion_debugfs_register(display); @@ -1502,6 +1316,68 @@ static int intel_crtc_pipe_show(struct seq_file *m, void *unused) } DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); +static int i915_joiner_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + + seq_printf(m, "%d\n", connector->force_joined_pipes); + + return 0; +} + +static ssize_t i915_joiner_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + int force_joined_pipes = 0; + int ret; + + if (len == 0) + return 0; + + ret = kstrtoint_from_user(ubuf, len, 0, &force_joined_pipes); + if (ret < 0) + return ret; + + switch (force_joined_pipes) { + case 0: + case 1: + case 2: + connector->force_joined_pipes = force_joined_pipes; + break; + case 4: + if (HAS_ULTRAJOINER(i915)) { + connector->force_joined_pipes = force_joined_pipes; + break; + } + + fallthrough; + default: + return -EINVAL; + } + + *offp += len; + + return len; +} + +static int i915_joiner_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_joiner_show, inode->i_private); +} + +static const struct file_operations i915_joiner_fops = { + .owner = THIS_MODULE, + .open = i915_joiner_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_joiner_write +}; + /** * intel_connector_debugfs_add - add i915 specific connector debugfs files * @connector: pointer to a registered intel_connector @@ -1548,11 +1424,11 @@ void intel_connector_debugfs_add(struct intel_connector *connector) connector, &i915_dsc_fractional_bpp_fops); } - if (DISPLAY_VER(i915) >= 11 && - (connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector_type == DRM_MODE_CONNECTOR_eDP)) { - debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root, - &connector->force_bigjoiner_enable); + if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) && + intel_dp_has_joiner(intel_attached_dp(connector))) { + debugfs_create_file("i915_joiner_force_enable", 0644, root, + connector, &i915_joiner_fops); } if (connector_type == DRM_MODE_CONNECTOR_DSI || diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 1b46ba985580..5f98e1b2a401 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -3,12 +3,13 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include <drm/drm_color_mgmt.h> #include <linux/pci.h> #include "i915_drv.h" #include "i915_reg.h" +#include "intel_cx0_phy_regs.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_device.h" @@ -31,14 +32,25 @@ struct stepping_desc { .step_info.size = ARRAY_SIZE(_map) struct subplatform_desc { - enum intel_display_subplatform subplatform; + struct intel_display_platforms platforms; const char *name; const u16 *pciidlist; struct stepping_desc step_info; }; +#define SUBPLATFORM(_platform, _subplatform) \ + .platforms._platform##_##_subplatform = 1, \ + .name = #_subplatform + +/* + * Group subplatform alias that matches multiple subplatforms. For making ult + * cover both ult and ulx on HSW/BDW. + */ +#define SUBPLATFORM_GROUP(_platform, _subplatform) \ + .platforms._platform##_##_subplatform = 1 + struct platform_desc { - enum intel_display_platform platform; + struct intel_display_platforms platforms; const char *name; const struct subplatform_desc *subplatforms; const struct intel_display_device_info *info; /* NULL for GMD ID */ @@ -46,9 +58,16 @@ struct platform_desc { }; #define PLATFORM(_platform) \ - .platform = (INTEL_DISPLAY_##_platform), \ + .platforms._platform = 1, \ .name = #_platform +/* + * Group platform alias that matches multiple platforms. For aliases such as g4x + * that covers both g45 and gm45. + */ +#define PLATFORM_GROUP(_platform) \ + .platforms._platform = 1 + #define ID(id) (id) static const struct intel_display_device_info no_display = {}; @@ -232,7 +251,7 @@ static const struct intel_display_device_info no_display = {}; .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) static const struct platform_desc i830_desc = { - PLATFORM(I830), + PLATFORM(i830), .info = &(const struct intel_display_device_info) { I830_DISPLAY, @@ -241,7 +260,7 @@ static const struct platform_desc i830_desc = { }; static const struct platform_desc i845_desc = { - PLATFORM(I845G), + PLATFORM(i845g), .info = &(const struct intel_display_device_info) { I845_DISPLAY, @@ -250,7 +269,7 @@ static const struct platform_desc i845_desc = { }; static const struct platform_desc i85x_desc = { - PLATFORM(I85X), + PLATFORM(i85x), .info = &(const struct intel_display_device_info) { I830_DISPLAY, @@ -260,7 +279,7 @@ static const struct platform_desc i85x_desc = { }; static const struct platform_desc i865g_desc = { - PLATFORM(I865G), + PLATFORM(i865g), .info = &(const struct intel_display_device_info) { I845_DISPLAY, @@ -282,7 +301,7 @@ static const struct platform_desc i865g_desc = { .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */ static const struct platform_desc i915g_desc = { - PLATFORM(I915G), + PLATFORM(i915g), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -292,7 +311,7 @@ static const struct platform_desc i915g_desc = { }; static const struct platform_desc i915gm_desc = { - PLATFORM(I915GM), + PLATFORM(i915gm), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -305,7 +324,7 @@ static const struct platform_desc i915gm_desc = { }; static const struct platform_desc i945g_desc = { - PLATFORM(I945G), + PLATFORM(i945g), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -316,7 +335,7 @@ static const struct platform_desc i945g_desc = { }; static const struct platform_desc i945gm_desc = { - PLATFORM(I915GM), + PLATFORM(i915gm), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -330,7 +349,7 @@ static const struct platform_desc i945gm_desc = { }; static const struct platform_desc g33_desc = { - PLATFORM(G33), + PLATFORM(g33), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I845_COLORS, @@ -339,7 +358,7 @@ static const struct platform_desc g33_desc = { }; static const struct platform_desc pnv_desc = { - PLATFORM(PINEVIEW), + PLATFORM(pineview), .info = &(const struct intel_display_device_info) { GEN3_DISPLAY, I9XX_COLORS, @@ -360,7 +379,7 @@ static const struct platform_desc pnv_desc = { BIT(TRANSCODER_A) | BIT(TRANSCODER_B) static const struct platform_desc i965g_desc = { - PLATFORM(I965G), + PLATFORM(i965g), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .has_overlay = 1, @@ -370,7 +389,7 @@ static const struct platform_desc i965g_desc = { }; static const struct platform_desc i965gm_desc = { - PLATFORM(I965GM), + PLATFORM(i965gm), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .has_overlay = 1, @@ -382,7 +401,8 @@ static const struct platform_desc i965gm_desc = { }; static const struct platform_desc g45_desc = { - PLATFORM(G45), + PLATFORM(g45), + PLATFORM_GROUP(g4x), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, @@ -391,7 +411,8 @@ static const struct platform_desc g45_desc = { }; static const struct platform_desc gm45_desc = { - PLATFORM(GM45), + PLATFORM(gm45), + PLATFORM_GROUP(g4x), .info = &(const struct intel_display_device_info) { GEN4_DISPLAY, .supports_tv = 1, @@ -414,14 +435,14 @@ static const struct platform_desc gm45_desc = { .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ static const struct platform_desc ilk_d_desc = { - PLATFORM(IRONLAKE), + PLATFORM(ironlake), .info = &(const struct intel_display_device_info) { ILK_DISPLAY, }, }; static const struct platform_desc ilk_m_desc = { - PLATFORM(IRONLAKE), + PLATFORM(ironlake), .info = &(const struct intel_display_device_info) { ILK_DISPLAY, @@ -430,7 +451,7 @@ static const struct platform_desc ilk_m_desc = { }; static const struct platform_desc snb_desc = { - PLATFORM(SANDYBRIDGE), + PLATFORM(sandybridge), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, I9XX_PIPE_OFFSETS, @@ -447,7 +468,7 @@ static const struct platform_desc snb_desc = { }; static const struct platform_desc ivb_desc = { - PLATFORM(IVYBRIDGE), + PLATFORM(ivybridge), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, IVB_PIPE_OFFSETS, @@ -464,7 +485,7 @@ static const struct platform_desc ivb_desc = { }; static const struct platform_desc vlv_desc = { - PLATFORM(VALLEYVIEW), + PLATFORM(valleyview), .info = &(const struct intel_display_device_info) { .has_gmch = 1, .has_hotplug = 1, @@ -495,10 +516,19 @@ static const u16 hsw_ulx_ids[] = { }; static const struct platform_desc hsw_desc = { - PLATFORM(HASWELL), + PLATFORM(haswell), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_HASWELL_ULT, "ULT", hsw_ult_ids }, - { INTEL_DISPLAY_HASWELL_ULX, "ULX", hsw_ulx_ids }, + /* Special case: Use ult both as group and subplatform. */ + { + SUBPLATFORM(haswell, ult), + SUBPLATFORM_GROUP(haswell, ult), + .pciidlist = hsw_ult_ids, + }, + { + SUBPLATFORM(haswell, ulx), + SUBPLATFORM_GROUP(haswell, ult), + .pciidlist = hsw_ulx_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -539,10 +569,19 @@ static const u16 bdw_ulx_ids[] = { }; static const struct platform_desc bdw_desc = { - PLATFORM(BROADWELL), + PLATFORM(broadwell), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_BROADWELL_ULT, "ULT", bdw_ult_ids }, - { INTEL_DISPLAY_BROADWELL_ULX, "ULX", bdw_ulx_ids }, + /* Special case: Use ult both as group and subplatform. */ + { + SUBPLATFORM(broadwell, ult), + SUBPLATFORM_GROUP(broadwell, ult), + .pciidlist = bdw_ult_ids, + }, + { + SUBPLATFORM(broadwell, ulx), + SUBPLATFORM_GROUP(broadwell, ult), + .pciidlist = bdw_ulx_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -567,7 +606,7 @@ static const struct platform_desc bdw_desc = { }; static const struct platform_desc chv_desc = { - PLATFORM(CHERRYVIEW), + PLATFORM(cherryview), .info = &(const struct intel_display_device_info) { .has_hotplug = 1, .has_gmch = 1, @@ -630,10 +669,16 @@ static const enum intel_step skl_steppings[] = { }; static const struct platform_desc skl_desc = { - PLATFORM(SKYLAKE), + PLATFORM(skylake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_SKYLAKE_ULT, "ULT", skl_ult_ids }, - { INTEL_DISPLAY_SKYLAKE_ULX, "ULX", skl_ulx_ids }, + { + SUBPLATFORM(skylake, ult), + .pciidlist = skl_ult_ids, + }, + { + SUBPLATFORM(skylake, ulx), + .pciidlist = skl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -665,10 +710,16 @@ static const enum intel_step kbl_steppings[] = { }; static const struct platform_desc kbl_desc = { - PLATFORM(KABYLAKE), + PLATFORM(kabylake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_KABYLAKE_ULT, "ULT", kbl_ult_ids }, - { INTEL_DISPLAY_KABYLAKE_ULX, "ULX", kbl_ulx_ids }, + { + SUBPLATFORM(kabylake, ult), + .pciidlist = kbl_ult_ids, + }, + { + SUBPLATFORM(kabylake, ulx), + .pciidlist = kbl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -690,10 +741,16 @@ static const u16 cfl_ulx_ids[] = { }; static const struct platform_desc cfl_desc = { - PLATFORM(COFFEELAKE), + PLATFORM(coffeelake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_COFFEELAKE_ULT, "ULT", cfl_ult_ids }, - { INTEL_DISPLAY_COFFEELAKE_ULX, "ULX", cfl_ulx_ids }, + { + SUBPLATFORM(coffeelake, ult), + .pciidlist = cfl_ult_ids, + }, + { + SUBPLATFORM(coffeelake, ulx), + .pciidlist = cfl_ulx_ids, + }, {}, }, .info = &skl_display, @@ -706,9 +763,12 @@ static const u16 cml_ult_ids[] = { }; static const struct platform_desc cml_desc = { - PLATFORM(COMETLAKE), + PLATFORM(cometlake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_COMETLAKE_ULT, "ULT", cml_ult_ids }, + { + SUBPLATFORM(cometlake, ult), + .pciidlist = cml_ult_ids, + }, {}, }, .info = &skl_display, @@ -745,7 +805,7 @@ static const enum intel_step bxt_steppings[] = { }; static const struct platform_desc bxt_desc = { - PLATFORM(BROXTON), + PLATFORM(broxton), .info = &(const struct intel_display_device_info) { GEN9_LP_DISPLAY, .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */ @@ -760,7 +820,7 @@ static const enum intel_step glk_steppings[] = { }; static const struct platform_desc glk_desc = { - PLATFORM(GEMINILAKE), + PLATFORM(geminilake), .info = &(const struct intel_display_device_info) { GEN9_LP_DISPLAY, .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ @@ -822,9 +882,12 @@ static const enum intel_step icl_steppings[] = { }; static const struct platform_desc icl_desc = { - PLATFORM(ICELAKE), + PLATFORM(icelake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ICELAKE_PORT_F, "Port F", icl_port_f_ids }, + { + SUBPLATFORM(icelake, port_f), + .pciidlist = icl_port_f_ids, + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -847,13 +910,13 @@ static const enum intel_step jsl_ehl_steppings[] = { }; static const struct platform_desc jsl_desc = { - PLATFORM(JASPERLAKE), + PLATFORM(jasperlake), .info = &jsl_ehl_display, STEP_INFO(jsl_ehl_steppings), }; static const struct platform_desc ehl_desc = { - PLATFORM(ELKHARTLAKE), + PLATFORM(elkhartlake), .info = &jsl_ehl_display, STEP_INFO(jsl_ehl_steppings), }; @@ -919,10 +982,13 @@ static const enum intel_step tgl_uy_steppings[] = { }; static const struct platform_desc tgl_desc = { - PLATFORM(TIGERLAKE), + PLATFORM(tigerlake), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids, - STEP_INFO(tgl_uy_steppings) }, + { + SUBPLATFORM(tigerlake, uy), + .pciidlist = tgl_uy_ids, + STEP_INFO(tgl_uy_steppings), + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -944,7 +1010,7 @@ static const enum intel_step dg1_steppings[] = { }; static const struct platform_desc dg1_desc = { - PLATFORM(DG1), + PLATFORM(dg1), .info = &(const struct intel_display_device_info) { XE_D_DISPLAY, @@ -961,7 +1027,7 @@ static const enum intel_step rkl_steppings[] = { }; static const struct platform_desc rkl_desc = { - PLATFORM(ROCKETLAKE), + PLATFORM(rocketlake), .info = &(const struct intel_display_device_info) { XE_D_DISPLAY, .abox_mask = BIT(0), @@ -996,10 +1062,13 @@ static const enum intel_step adl_s_rpl_s_steppings[] = { }; static const struct platform_desc adl_s_desc = { - PLATFORM(ALDERLAKE_S), + PLATFORM(alderlake_s), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids, - STEP_INFO(adl_s_rpl_s_steppings) }, + { + SUBPLATFORM(alderlake_s, raptorlake_s), + .pciidlist = adls_rpls_ids, + STEP_INFO(adl_s_rpl_s_steppings), + }, {}, }, .info = &(const struct intel_display_device_info) { @@ -1100,14 +1169,23 @@ static const enum intel_step adl_p_rpl_pu_steppings[] = { }; static const struct platform_desc adl_p_desc = { - PLATFORM(ALDERLAKE_P), + PLATFORM(alderlake_p), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids, - STEP_INFO(adl_p_adl_n_steppings) }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids, - STEP_INFO(adl_p_rpl_pu_steppings) }, - { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids, - STEP_INFO(adl_p_rpl_pu_steppings) }, + { + SUBPLATFORM(alderlake_p, alderlake_n), + .pciidlist = adlp_adln_ids, + STEP_INFO(adl_p_adl_n_steppings), + }, + { + SUBPLATFORM(alderlake_p, raptorlake_p), + .pciidlist = adlp_rplp_ids, + STEP_INFO(adl_p_rpl_pu_steppings), + }, + { + SUBPLATFORM(alderlake_p, raptorlake_u), + .pciidlist = adlp_rplu_ids, + STEP_INFO(adl_p_rpl_pu_steppings), + }, {}, }, .info = &xe_lpd_display, @@ -1159,14 +1237,23 @@ static const enum intel_step dg2_g12_steppings[] = { }; static const struct platform_desc dg2_desc = { - PLATFORM(DG2), + PLATFORM(dg2), .subplatforms = (const struct subplatform_desc[]) { - { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids, - STEP_INFO(dg2_g10_steppings) }, - { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids, - STEP_INFO(dg2_g11_steppings) }, - { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids, - STEP_INFO(dg2_g12_steppings) }, + { + SUBPLATFORM(dg2, g10), + .pciidlist = dg2_g10_ids, + STEP_INFO(dg2_g10_steppings), + }, + { + SUBPLATFORM(dg2, g11), + .pciidlist = dg2_g11_ids, + STEP_INFO(dg2_g11_steppings), + }, + { + SUBPLATFORM(dg2, g12), + .pciidlist = dg2_g12_ids, + STEP_INFO(dg2_g12_steppings), + }, {}, }, .info = &xe_hpd_display, @@ -1227,6 +1314,7 @@ static const struct intel_display_device_info xe2_lpd_display = { .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D), + .__runtime_defaults.has_dbuf_overlap_detection = true, }; static const struct intel_display_device_info xe2_hpd_display = { @@ -1241,15 +1329,19 @@ static const struct intel_display_device_info xe2_hpd_display = { * reported by the hardware. */ static const struct platform_desc mtl_desc = { - PLATFORM(METEORLAKE), + PLATFORM(meteorlake), }; static const struct platform_desc lnl_desc = { - PLATFORM(LUNARLAKE), + PLATFORM(lunarlake), }; static const struct platform_desc bmg_desc = { - PLATFORM(BATTLEMAGE), + PLATFORM(battlemage), +}; + +static const struct platform_desc ptl_desc = { + PLATFORM(pantherlake), }; __diag_pop(); @@ -1318,9 +1410,11 @@ static const struct { INTEL_RPLU_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc), INTEL_RPLP_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc), INTEL_DG2_IDS(INTEL_DISPLAY_DEVICE, &dg2_desc), + INTEL_ARL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc), INTEL_MTL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc), INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc), INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc), + INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc), }; static const struct { @@ -1331,6 +1425,7 @@ static const struct { { 14, 0, &xe_lpdp_display }, { 14, 1, &xe2_hpd_display }, { 20, 0, &xe2_lpd_display }, + { 30, 0, &xe2_lpd_display }, }; static const struct intel_display_device_info * @@ -1391,7 +1486,7 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc) const struct subplatform_desc *sp; const u16 *id; - for (sp = desc->subplatforms; sp && sp->subplatform; sp++) + for (sp = desc->subplatforms; sp && sp->pciidlist; sp++) for (id = sp->pciidlist; *id; id++) if (*id == pdev->device) return sp; @@ -1450,6 +1545,25 @@ static enum intel_step get_pre_gmdid_step(struct intel_display *display, return step; } +/* Size of the entire bitmap, not the number of platforms */ +static unsigned int display_platforms_num_bits(void) +{ + return sizeof(((struct intel_display_platforms *)0)->bitmap) * BITS_PER_BYTE; +} + +/* Number of platform bits set */ +static unsigned int display_platforms_weight(const struct intel_display_platforms *p) +{ + return bitmap_weight(p->bitmap, display_platforms_num_bits()); +} + +/* Merge the subplatform information from src to dst */ +static void display_platforms_or(struct intel_display_platforms *dst, + const struct intel_display_platforms *src) +{ + bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits()); +} + void intel_display_device_probe(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; @@ -1489,13 +1603,23 @@ void intel_display_device_probe(struct drm_i915_private *i915) &DISPLAY_INFO(i915)->__runtime_defaults, sizeof(*DISPLAY_RUNTIME_INFO(i915))); - drm_WARN_ON(&i915->drm, !desc->platform || !desc->name); - DISPLAY_RUNTIME_INFO(i915)->platform = desc->platform; + drm_WARN_ON(&i915->drm, !desc->name || + !display_platforms_weight(&desc->platforms)); + + display->platform = desc->platforms; subdesc = find_subplatform_desc(pdev, desc); if (subdesc) { - drm_WARN_ON(&i915->drm, !subdesc->subplatform || !subdesc->name); - DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform; + drm_WARN_ON(&i915->drm, !subdesc->name || + !display_platforms_weight(&subdesc->platforms)); + + display_platforms_or(&display->platform, &subdesc->platforms); + + /* Ensure platform and subplatform are distinct */ + drm_WARN_ON(&i915->drm, + display_platforms_weight(&display->platform) != + display_platforms_weight(&desc->platforms) + + display_platforms_weight(&subdesc->platforms)); } if (ip_ver.ver || ip_ver.rel || ip_ver.step) { @@ -1531,6 +1655,7 @@ void intel_display_device_remove(struct drm_i915_private *i915) static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); enum pipe pipe; @@ -1651,8 +1776,10 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) display_runtime->has_hdcp = 0; - if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - display_runtime->fbc_mask = 0; + if (IS_DG2(i915) || DISPLAY_VER(i915) < 13) { + if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) + display_runtime->fbc_mask = 0; + } if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) display_runtime->has_dmc = 0; @@ -1660,6 +1787,10 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 if (IS_DISPLAY_VER(i915, 10, 12) && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) display_runtime->has_dsc = 0; + + if (DISPLAY_VER(display) >= 20 && + (dfsm & XE2LPD_DFSM_DBUF_OVERLAP_DISABLE)) + display_runtime->has_dbuf_overlap_detection = false; } if (DISPLAY_VER(i915) >= 20) { @@ -1677,7 +1808,11 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9 } } - display_runtime->rawclk_freq = intel_read_rawclk(i915); + if (DISPLAY_VER(i915) >= 30) + display_runtime->edp_typec_support = + intel_de_read(display, PICA_PHY_CONFIG_CONTROL) & EDP_ON_TYPEC; + + display_runtime->rawclk_freq = intel_read_rawclk(display); drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq); return; diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index dfb0c8bf5ca2..43144a037f9f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -6,6 +6,7 @@ #ifndef __INTEL_DISPLAY_DEVICE_H__ #define __INTEL_DISPLAY_DEVICE_H__ +#include <linux/bitops.h> #include <linux/types.h> #include "intel_display_conversion.h" @@ -14,89 +15,108 @@ struct drm_i915_private; struct drm_printer; -/* Keep in gen based order, and chronological order within a gen */ -enum intel_display_platform { - INTEL_DISPLAY_PLATFORM_UNINITIALIZED = 0, - /* Display ver 2 */ - INTEL_DISPLAY_I830, - INTEL_DISPLAY_I845G, - INTEL_DISPLAY_I85X, - INTEL_DISPLAY_I865G, - /* Display ver 3 */ - INTEL_DISPLAY_I915G, - INTEL_DISPLAY_I915GM, - INTEL_DISPLAY_I945G, - INTEL_DISPLAY_I945GM, - INTEL_DISPLAY_G33, - INTEL_DISPLAY_PINEVIEW, - /* Display ver 4 */ - INTEL_DISPLAY_I965G, - INTEL_DISPLAY_I965GM, - INTEL_DISPLAY_G45, - INTEL_DISPLAY_GM45, - /* Display ver 5 */ - INTEL_DISPLAY_IRONLAKE, - /* Display ver 6 */ - INTEL_DISPLAY_SANDYBRIDGE, - /* Display ver 7 */ - INTEL_DISPLAY_IVYBRIDGE, - INTEL_DISPLAY_VALLEYVIEW, - INTEL_DISPLAY_HASWELL, - /* Display ver 8 */ - INTEL_DISPLAY_BROADWELL, - INTEL_DISPLAY_CHERRYVIEW, - /* Display ver 9 */ - INTEL_DISPLAY_SKYLAKE, - INTEL_DISPLAY_BROXTON, - INTEL_DISPLAY_KABYLAKE, - INTEL_DISPLAY_GEMINILAKE, - INTEL_DISPLAY_COFFEELAKE, - INTEL_DISPLAY_COMETLAKE, - /* Display ver 11 */ - INTEL_DISPLAY_ICELAKE, - INTEL_DISPLAY_JASPERLAKE, - INTEL_DISPLAY_ELKHARTLAKE, - /* Display ver 12 */ - INTEL_DISPLAY_TIGERLAKE, - INTEL_DISPLAY_ROCKETLAKE, - INTEL_DISPLAY_DG1, - INTEL_DISPLAY_ALDERLAKE_S, - /* Display ver 13 */ - INTEL_DISPLAY_ALDERLAKE_P, - INTEL_DISPLAY_DG2, - /* Display ver 14 (based on GMD ID) */ - INTEL_DISPLAY_METEORLAKE, - /* Display ver 20 (based on GMD ID) */ - INTEL_DISPLAY_LUNARLAKE, - /* Display ver 14.1 (based on GMD ID) */ - INTEL_DISPLAY_BATTLEMAGE, -}; +/* + * Display platforms and subplatforms. Keep platforms in display version based + * order, chronological order within a version, and subplatforms next to the + * platform. + */ +#define INTEL_DISPLAY_PLATFORMS(func) \ + /* Display ver 2 */ \ + func(i830) \ + func(i845g) \ + func(i85x) \ + func(i865g) \ + /* Display ver 3 */ \ + func(i915g) \ + func(i915gm) \ + func(i945g) \ + func(i945gm) \ + func(g33) \ + func(pineview) \ + /* Display ver 4 */ \ + func(i965g) \ + func(i965gm) \ + func(g45) \ + func(gm45) \ + func(g4x) /* group alias for g45 and gm45 */ \ + /* Display ver 5 */ \ + func(ironlake) \ + /* Display ver 6 */ \ + func(sandybridge) \ + /* Display ver 7 */ \ + func(ivybridge) \ + func(valleyview) \ + func(haswell) \ + func(haswell_ult) \ + func(haswell_ulx) \ + /* Display ver 8 */ \ + func(broadwell) \ + func(broadwell_ult) \ + func(broadwell_ulx) \ + func(cherryview) \ + /* Display ver 9 */ \ + func(skylake) \ + func(skylake_ult) \ + func(skylake_ulx) \ + func(broxton) \ + func(kabylake) \ + func(kabylake_ult) \ + func(kabylake_ulx) \ + func(geminilake) \ + func(coffeelake) \ + func(coffeelake_ult) \ + func(coffeelake_ulx) \ + func(cometlake) \ + func(cometlake_ult) \ + func(cometlake_ulx) \ + /* Display ver 11 */ \ + func(icelake) \ + func(icelake_port_f) \ + func(jasperlake) \ + func(elkhartlake) \ + /* Display ver 12 */ \ + func(tigerlake) \ + func(tigerlake_uy) \ + func(rocketlake) \ + func(dg1) \ + func(alderlake_s) \ + func(alderlake_s_raptorlake_s) \ + /* Display ver 13 */ \ + func(alderlake_p) \ + func(alderlake_p_alderlake_n) \ + func(alderlake_p_raptorlake_p) \ + func(alderlake_p_raptorlake_u) \ + func(dg2) \ + func(dg2_g10) \ + func(dg2_g11) \ + func(dg2_g12) \ + /* Display ver 14 (based on GMD ID) */ \ + func(meteorlake) \ + /* Display ver 20 (based on GMD ID) */ \ + func(lunarlake) \ + /* Display ver 14.1 (based on GMD ID) */ \ + func(battlemage) \ + /* Display ver 30 (based on GMD ID) */ \ + func(pantherlake) + +#define __MEMBER(name) unsigned long name:1; +#define __COUNT(x) 1 + -enum intel_display_subplatform { - INTEL_DISPLAY_SUBPLATFORM_UNINITIALIZED = 0, - INTEL_DISPLAY_HASWELL_ULT, - INTEL_DISPLAY_HASWELL_ULX, - INTEL_DISPLAY_BROADWELL_ULT, - INTEL_DISPLAY_BROADWELL_ULX, - INTEL_DISPLAY_SKYLAKE_ULT, - INTEL_DISPLAY_SKYLAKE_ULX, - INTEL_DISPLAY_KABYLAKE_ULT, - INTEL_DISPLAY_KABYLAKE_ULX, - INTEL_DISPLAY_COFFEELAKE_ULT, - INTEL_DISPLAY_COFFEELAKE_ULX, - INTEL_DISPLAY_COMETLAKE_ULT, - INTEL_DISPLAY_COMETLAKE_ULX, - INTEL_DISPLAY_ICELAKE_PORT_F, - INTEL_DISPLAY_TIGERLAKE_UY, - INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, - INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, - INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, - INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, - INTEL_DISPLAY_DG2_G10, - INTEL_DISPLAY_DG2_G11, - INTEL_DISPLAY_DG2_G12, +#define __NUM_PLATFORMS (INTEL_DISPLAY_PLATFORMS(__COUNT) 0) + +struct intel_display_platforms { + union { + struct { + INTEL_DISPLAY_PLATFORMS(__MEMBER); + }; + DECLARE_BITMAP(bitmap, __NUM_PLATFORMS); + }; }; +#undef __MEMBER +#undef __COUNT +#undef __NUM_PLATFORMS + #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ /* Keep in alphabetical order */ \ func(cursor_needs_physical); \ @@ -118,10 +138,12 @@ enum intel_display_subplatform { #define HAS_4TILE(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) +#define HAS_BIGJOINER(i915) (DISPLAY_VER(i915) >= 11 && HAS_DSC(i915)) #define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash) #define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13)) #define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915)) +#define HAS_DBUF_OVERLAP_DETECTION(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dbuf_overlap_detection) #define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi) #define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0) #define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc) @@ -149,9 +171,13 @@ enum intel_display_subplatform { #define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr) #define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12) -#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915)) +#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915) && !IS_GEMINILAKE(i915)) #define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \ BIT(trans)) != 0) +#define HAS_UNCOMPRESSED_JOINER(i915) (DISPLAY_VER(i915) >= 13) +#define HAS_ULTRAJOINER(i915) ((DISPLAY_VER(i915) >= 20 || \ + (IS_DGFX(i915) && DISPLAY_VER(i915) == 14)) && \ + HAS_DSC(i915)) #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) #define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13) #define HAS_CMRR(i915) (DISPLAY_VER(i915) >= 20) @@ -161,10 +187,10 @@ enum intel_display_subplatform { #define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv) /* Check that device has a display IP version within the specific range. */ -#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \ - BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ - (DISPLAY_VER_FULL(__i915) >= (from) && \ - DISPLAY_VER_FULL(__i915) <= (until))) +#define IS_DISPLAY_VERx100(__i915, from, until) ( \ + BUILD_BUG_ON_ZERO((from) < 200) + \ + (DISPLAY_VERx100(__i915) >= (from) && \ + DISPLAY_VERx100(__i915) <= (until))) /* * Check if a device has a specific IP version as well as a stepping within the @@ -175,22 +201,22 @@ enum intel_display_subplatform { * hardware fix is present and the software workaround is no longer necessary. * E.g., * - * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2) - * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER) + * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_B2) + * IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_C0, STEP_FOREVER) * * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper * stepping bound for the specified IP version. */ -#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \ - (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \ +#define IS_DISPLAY_VERx100_STEP(__i915, ipver, from, until) \ + (IS_DISPLAY_VERx100((__i915), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__i915), (from), (until))) #define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info) #define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info) #define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) -#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \ - DISPLAY_RUNTIME_INFO(i915)->ip.rel) +#define DISPLAY_VERx100(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver * 100 + \ + DISPLAY_RUNTIME_INFO(i915)->ip.rel) #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) @@ -201,9 +227,6 @@ enum intel_display_subplatform { INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) struct intel_display_runtime_info { - enum intel_display_platform platform; - enum intel_display_subplatform subplatform; - struct intel_display_ip_ver { u16 ver; u16 rel; @@ -225,6 +248,8 @@ struct intel_display_runtime_info { bool has_hdcp; bool has_dmc; bool has_dsc; + bool edp_typec_support; + bool has_dbuf_overlap_detection; }; struct intel_display_device_info { diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 069426d9260b..56b78cf6b854 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -11,7 +11,7 @@ #include <acpi/video.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_mode_config.h> #include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> @@ -82,16 +82,17 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev) void intel_display_driver_init_hw(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_cdclk_state *cdclk_state; if (!HAS_DISPLAY(i915)) return; - cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); + cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); - intel_update_cdclk(i915); - intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; intel_display_wa_apply(i915); } @@ -168,10 +169,11 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915) static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); @@ -192,8 +194,8 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915) intel_display_irq_init(i915); intel_dkl_phy_init(i915); - intel_color_init_hooks(i915); - intel_init_cdclk_hooks(i915); + intel_color_init_hooks(&i915->display); + intel_init_cdclk_hooks(&i915->display); intel_audio_hooks_init(i915); intel_dpll_init_clock_hook(i915); intel_init_display_hooks(i915); @@ -219,7 +221,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) intel_bios_init(display); - ret = intel_vga_register(i915); + ret = intel_vga_register(display); if (ret) goto cleanup_bios; @@ -235,7 +237,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return 0; - intel_dmc_init(i915); + intel_dmc_init(display); i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | @@ -243,11 +245,11 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) intel_mode_config_init(i915); - ret = intel_cdclk_init(i915); + ret = intel_cdclk_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; - ret = intel_color_init(i915); + ret = intel_color_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; @@ -270,10 +272,10 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) return 0; cleanup_vga_client_pw_domain_dmc: - intel_dmc_fini(i915); + intel_dmc_fini(display); intel_power_domains_driver_remove(i915); cleanup_vga: - intel_vga_unregister(i915); + intel_vga_unregister(display); cleanup_bios: intel_bios_driver_remove(display); @@ -430,7 +432,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_pps_setup(display); - intel_gmbus_setup(i915); + intel_gmbus_setup(display); drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", INTEL_NUM_PIPES(i915), @@ -450,13 +452,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_display_driver_init_hw(i915); intel_dpll_update_ref_clks(i915); - if (i915->display.cdclk.max_cdclk_freq == 0) - intel_update_max_cdclk(i915); + if (display->cdclk.max_cdclk_freq == 0) + intel_update_max_cdclk(display); intel_hti_init(display); /* Just disable it once at startup */ - intel_vga_disable(i915); + intel_vga_disable(display); intel_setup_outputs(i915); ret = intel_dp_tunnel_mgr_init(display); @@ -483,7 +485,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) return 0; err_hdcp: - intel_hdcp_component_fini(i915); + intel_hdcp_component_fini(display); err_mode_config: intel_mode_config_cleanup(i915); @@ -493,6 +495,7 @@ err_mode_config: /* part #3: call after gem init */ int intel_display_driver_probe(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; int ret; if (!HAS_DISPLAY(i915)) @@ -503,7 +506,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915) * the BIOS fb takeover and whatever else magic ggtt reservations * happen during gem/ggtt init. */ - intel_hdcp_component_init(i915); + intel_hdcp_component_init(display); /* * Force all active planes to recompute their states. So that on @@ -598,7 +601,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) /* flush any delayed tasks or pending work */ flush_workqueue(i915->unordered_wq); - intel_hdcp_component_fini(i915); + intel_hdcp_component_fini(display); intel_mode_config_cleanup(i915); @@ -606,7 +609,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_overlay_cleanup(i915); - intel_gmbus_teardown(i915); + intel_gmbus_teardown(display); destroy_workqueue(i915->display.wq.flip); destroy_workqueue(i915->display.wq.modeset); @@ -619,11 +622,11 @@ void intel_display_driver_remove_nogem(struct drm_i915_private *i915) { struct intel_display *display = &i915->display; - intel_dmc_fini(i915); + intel_dmc_fini(display); intel_power_domains_driver_remove(i915); - intel_vga_unregister(i915); + intel_vga_unregister(display); intel_bios_driver_remove(display); } @@ -681,12 +684,13 @@ __intel_display_driver_resume(struct drm_i915_private *i915, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = &i915->display; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; int ret, i; intel_modeset_setup_hw_state(i915, ctx); - intel_vga_redisable(i915); + intel_vga_redisable(display); if (!state) return 0; diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 73369847ed66..e1547ebce60e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_vblank.h> + #include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_irq.h" @@ -27,7 +29,8 @@ static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); drm_crtc_handle_vblank(&crtc->base); } @@ -269,14 +272,17 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, intel_uncore_posting_read(&dev_priv->uncore, reg); } -static bool i915_has_asle(struct drm_i915_private *i915) +static bool i915_has_legacy_blc_interrupt(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); - if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915)) - return false; + if (IS_I85X(i915)) + return true; + + if (IS_PINEVIEW(i915)) + return true; - return intel_opregion_asle_present(display); + return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915); } /** @@ -285,7 +291,12 @@ static bool i915_has_asle(struct drm_i915_private *i915) */ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - if (!i915_has_asle(dev_priv)) + struct intel_display *display = &dev_priv->display; + + if (!intel_opregion_asle_present(display)) + return; + + if (!i915_has_legacy_blc_interrupt(display)) return; spin_lock_irq(&dev_priv->irq_lock); @@ -298,14 +309,15 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -#if defined(CONFIG_DEBUG_FS) +#if IS_ENABLED(CONFIG_DEBUG_FS) static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -344,7 +356,8 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_display *display = &i915->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); spin_lock(&i915->drm.event_lock); @@ -400,7 +413,7 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, res1, res2); } -void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -480,28 +493,10 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->irq_lock); } -void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, - u16 iir, u32 pipe_stats[I915_MAX_PIPES]) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } -} - void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { struct intel_display *display = &dev_priv->display; - bool blc_event = false; enum pipe pipe; @@ -548,12 +543,13 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_opregion_asle_intr(display); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { + struct intel_display *display = &dev_priv->display; enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -571,7 +567,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) @@ -593,7 +589,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_HDCP_MASK) drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); @@ -682,7 +678,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_dp_aux_irq_handler(display); if (pch_iir & SDE_GMBUS_CPT) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); @@ -907,6 +903,13 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) struct intel_display *display = &dev_priv->display; bool found = false; + if (HAS_DBUF_OVERLAP_DETECTION(display)) { + if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { + drm_warn(display->drm, "DBuf overlap detected\n"); + found = true; + } + } + if (DISPLAY_VER(dev_priv) >= 14) { if (iir & (XELPDP_PMDEMAND_RSP | XELPDP_PMDEMAND_RSPTOUT_ERR)) { @@ -1026,17 +1029,6 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) return GEN8_PIPE_PRIMARY_FLIP_DONE; } -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) -{ - u32 mask = GEN8_PIPE_FIFO_UNDERRUN; - - if (DISPLAY_VER(dev_priv) >= 13) - mask |= XELPD_PIPE_SOFT_UNDERRUN | - XELPD_PIPE_HARD_UNDERRUN; - - return mask; -} - static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) { u32 pica_ier = 0; @@ -1125,7 +1117,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && (iir & BXT_DE_PORT_GMBUS)) { - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); found = true; } @@ -1182,7 +1174,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); - if (iir & gen8_de_pipe_underrun_mask(dev_priv)) + if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); @@ -1226,15 +1218,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { - void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; - iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); + iir = intel_de_read(i915, GEN11_GU_MISC_IIR); if (likely(iir)) - raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); + intel_de_write(i915, GEN11_GU_MISC_IIR, iir); return iir; } @@ -1249,25 +1240,56 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) void gen11_display_irq_handler(struct drm_i915_private *i915) { - void __iomem * const regs = intel_uncore_regs(&i915->uncore); - const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); + u32 disp_ctl; disable_rpm_wakeref_asserts(&i915->runtime_pm); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. */ - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); + disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL); + + intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0); gen8_de_irq_handler(i915, disp_ctl); - raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, - GEN11_DISPLAY_IRQ_ENABLE); + intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); enable_rpm_wakeref_asserts(&i915->runtime_pm); } -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ +static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.vblank_time_lock); + + /* + * Vblank/CRC interrupts fail to wake the device up from C2+. + * Disabling render clock gating during C-states avoids + * the problem. There is a small power cost so we do this + * only when vblank/CRC interrupts are actually enabled. + */ + if (i915->display.irq.vblank_enabled++ == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); +} + +static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.vblank_time_lock); + + if (--i915->display.irq.vblank_enabled == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); +} + +void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) +{ + spin_lock_irq(&i915->drm.vblank_time_lock); + + if (enable) + i915gm_irq_cstate_wa_enable(i915); + else + i915gm_irq_cstate_wa_disable(i915); + + spin_unlock_irq(&i915->drm.vblank_time_lock); +} + int i8xx_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1281,22 +1303,35 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) return 0; } +void i8xx_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + int i915gm_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->dev); - /* - * Vblank interrupts fail to wake the device up from C2+. - * Disabling render clock gating during C-states avoids - * the problem. There is a small power cost so we do this - * only when vblank interrupts are actually enabled. - */ - if (i915->display.irq.vblank_enabled++ == 0) - intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + i915gm_irq_cstate_wa_enable(i915); return i8xx_enable_vblank(crtc); } +void i915gm_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->dev); + + i8xx_disable_vblank(crtc); + + i915gm_irq_cstate_wa_disable(i915); +} + int i965_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1311,6 +1346,18 @@ int i965_enable_vblank(struct drm_crtc *crtc) return 0; } +void i965_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + int ilk_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); @@ -1332,6 +1379,19 @@ int ilk_enable_vblank(struct drm_crtc *crtc) return 0; } +void ilk_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + unsigned long irqflags; + u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ilk_disable_display_irq(dev_priv, bit); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, bool enable) { @@ -1356,9 +1416,27 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, return true; } +static void intel_display_vblank_dc_work(struct work_struct *work) +{ + struct intel_display *display = + container_of(work, typeof(*display), irq.vblank_dc_work); + struct drm_i915_private *i915 = to_i915(display->drm); + int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes); + + /* + * NOTE: intel_display_power_set_target_dc_state is used only by PSR + * code for DC3CO handling. DC3CO target state is currently disabled in + * PSR code. If DC3CO is taken into use we need take that into account + * here as well. + */ + intel_display_power_set_target_dc_state(i915, vblank_wa_num_pipes ? DC_STATE_DISABLE : + DC_STATE_EN_UPTO_DC6); +} + int bdw_enable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; @@ -1366,6 +1444,9 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) if (gen11_dsi_configure_te(crtc, true)) return 0; + if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0) + schedule_work(&display->irq.vblank_dc_work); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1379,58 +1460,10 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) return 0; } -/* Called from drm generic code, passed 'crtc' which - * we use as a pipe index - */ -void i8xx_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void i915gm_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *i915 = to_i915(crtc->dev); - - i8xx_disable_vblank(crtc); - - if (--i915->display.irq.vblank_enabled == 0) - intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); -} - -void i965_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -void ilk_disable_vblank(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; - unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? - DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_disable_display_irq(dev_priv, bit); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - void bdw_disable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; @@ -1441,6 +1474,9 @@ void bdw_disable_vblank(struct drm_crtc *_crtc) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) + schedule_work(&display->irq.vblank_dc_work); } void vlv_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1457,10 +1493,21 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv) i9xx_pipestat_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, VLV_); + gen2_irq_reset(uncore, VLV_IRQ_REGS); dev_priv->irq_mask = ~0u; } +void i9xx_display_irq_reset(struct drm_i915_private *i915) +{ + if (I915_HAS_HOTPLUG(i915)) { + i915_hotplug_interrupt_update(i915, 0xffffffff, 0); + intel_uncore_rmw(&i915->uncore, + PORT_HOTPLUG_STAT(i915), 0, 0); + } + + i9xx_pipestat_irq_reset(i915); +} + void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -1489,7 +1536,7 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~enable_mask; - GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); } void gen8_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1506,10 +1553,10 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); + gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); } void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -1549,26 +1596,25 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); - GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); - GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); + gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS); if (DISPLAY_VER(dev_priv) >= 14) - GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); + gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS); else - GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); + gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - GEN3_IRQ_RESET(uncore, SDE); + gen2_irq_reset(uncore, SDE_IRQ_REGS); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { struct intel_uncore *uncore = &dev_priv->uncore; - u32 extra_ier = GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | + u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | gen8_de_pipe_flip_done_mask(dev_priv); enum pipe pipe; @@ -1580,9 +1626,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, } for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->display.irq.de_irq_mask[pipe], - ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); + gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), + dev_priv->display.irq.de_irq_mask[pipe], + ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } @@ -1601,7 +1647,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, } for_each_pipe_masked(dev_priv, pipe, pipe_mask) - GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); + gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe)); spin_unlock_irq(&dev_priv->irq_lock); @@ -1635,7 +1681,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) else mask = SDE_GMBUS_CPT; - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) @@ -1692,7 +1738,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) } if (IS_HASWELL(i915)) { - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); display_mask |= DE_EDP_PSR_INT_HSW; } @@ -1703,7 +1749,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) ibx_irq_postinstall(i915); - GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, + gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask, display_mask | extra_mask); } @@ -1751,14 +1797,16 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_port_masked |= DSI0_TE | DSI1_TE; } + if (HAS_DBUF_OVERLAP_DETECTION(display)) + de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; + if (HAS_DSB(dev_priv)) de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | GEN12_DSB_INT(INTEL_DSB_1) | GEN12_DSB_INT(INTEL_DSB_2); de_pipe_enables = de_pipe_masked | - GEN8_PIPE_VBLANK | - gen8_de_pipe_underrun_mask(dev_priv) | + GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | gen8_de_pipe_flip_done_mask(dev_priv); de_port_enables = de_port_masked; @@ -1777,11 +1825,11 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (!intel_display_power_is_enabled(dev_priv, domain)) continue; - gen3_assert_iir_is_zero(uncore, + gen2_assert_iir_is_zero(uncore, TRANS_PSR_IIR(dev_priv, trans)); } } else { - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR); } for_each_pipe(dev_priv, pipe) { @@ -1789,20 +1837,20 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) - GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->display.irq.de_irq_mask[pipe], - de_pipe_enables); + gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe), + dev_priv->display.irq.de_irq_mask[pipe], + de_pipe_enables); } - GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); - GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); + gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables); + gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); if (IS_DISPLAY_VER(dev_priv, 11, 13)) { u32 de_hpd_masked = 0; u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; - GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, + gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, de_hpd_enables); } } @@ -1815,10 +1863,10 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915) u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK; - GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, + gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, de_hpd_enables); - GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); } static void icp_irq_postinstall(struct drm_i915_private *dev_priv) @@ -1826,7 +1874,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; u32 mask = SDE_GMBUS_ICP; - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); + gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff); } void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) @@ -1866,4 +1914,7 @@ void intel_display_irq_init(struct drm_i915_private *i915) i915->display.irq.display_irqs_enabled = false; intel_hotplug_irq_init(i915); + + INIT_WORK(&i915->display.irq.vblank_dc_work, + intel_display_vblank_dc_work); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index 2a090dd6abd7..b077712b7be1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -33,7 +33,6 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask); -u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *i915); int i8xx_enable_vblank(struct drm_crtc *crtc); int i915gm_enable_vblank(struct drm_crtc *crtc); @@ -54,6 +53,7 @@ void gen11_display_irq_handler(struct drm_i915_private *i915); u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl); void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir); +void i9xx_display_irq_reset(struct drm_i915_private *i915); void vlv_display_irq_reset(struct drm_i915_private *i915); void gen8_display_irq_reset(struct drm_i915_private *i915); void gen11_display_irq_reset(struct drm_i915_private *i915); @@ -68,15 +68,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe); void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); void i915_enable_asle_pipestat(struct drm_i915_private *i915); -void i9xx_pipestat_irq_reset(struct drm_i915_private *i915); void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]); -void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]); void intel_display_irq_init(struct drm_i915_private *i915); +void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable); + #endif /* __INTEL_DISPLAY_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h index c4775c99dc83..f0fa27e365ab 100644 --- a/drivers/gpu/drm/i915/display/intel_display_limits.h +++ b/drivers/gpu/drm/i915/display/intel_display_limits.h @@ -50,6 +50,16 @@ enum transcoder { }; /* + * Global legacy plane identifier. Valid only for primary/sprite + * planes on pre-g4x, and only for primary planes on g4x-bdw. + */ +enum i9xx_plane_id { + PLANE_A, + PLANE_B, + PLANE_C, +}; + +/* * Per-pipe plane identifier. * I915_MAX_PLANES in the enum below is the maximum (across all platforms) * number of planes per CRTC. Not all platforms really have this many planes, diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index 1a45d300b6f0..024de8abcb1a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -173,14 +173,16 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name, /** * intel_display_params_dump - dump intel display modparams - * @display: display device + * @params: display params + * @driver_name: driver name to use for printing * @p: the &drm_printer * * Pretty printer for i915 modparams. */ -void intel_display_params_dump(struct intel_display *display, struct drm_printer *p) +void intel_display_params_dump(const struct intel_display_params *params, + const char *driver_name, struct drm_printer *p) { -#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x); +#define PRINT(T, x, ...) _param_print(p, driver_name, #x, params->x); INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT); #undef PRINT } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index da8dc943234b..dcb6face936a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -9,7 +9,6 @@ #include <linux/types.h> struct drm_printer; -struct intel_display; /* * Invoke param, a function-like macro, for each intel display param, with @@ -56,8 +55,8 @@ struct intel_display_params { }; #undef MEMBER -void intel_display_params_dump(struct intel_display *display, - struct drm_printer *p); +void intel_display_params_dump(const struct intel_display_params *params, + const char *driver_name, struct drm_printer *p); void intel_display_params_copy(struct intel_display_params *dest); void intel_display_params_free(struct intel_display_params *params); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ef2fdbf97346..2766fd9208b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -545,7 +545,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) - return false; + return NULL; mutex_lock(&power_domains->lock); @@ -560,7 +560,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, if (!is_enabled) { intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - wakeref = 0; + wakeref = NULL; } return wakeref; @@ -648,7 +648,7 @@ intel_display_power_put_async_work(struct work_struct *work) struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); - intel_wakeref_t old_work_wakeref = 0; + intel_wakeref_t old_work_wakeref = NULL; mutex_lock(&power_domains->lock); @@ -895,7 +895,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) { - intel_wakeref_t __maybe_unused wf = -1; + intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); @@ -1176,43 +1176,44 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) - I915_STATE_WARN(dev_priv, crtc->active, - "CRTC for pipe %c enabled\n", - pipe_name(crtc->pipe)); - - I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), - "Display power well on\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, - "SPLL enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, - "WRPLL1 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, - "WRPLL2 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, PP_STATUS(dev_priv, 0)) & PP_ON, - "Panel power on\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, - "CPU PWM1 enabled\n"); + for_each_intel_crtc(display->drm, crtc) + INTEL_DISPLAY_STATE_WARN(display, crtc->active, + "CRTC for pipe %c enabled\n", + pipe_name(crtc->pipe)); + + INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2), + "Display power well on\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE, + "SPLL enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, + "WRPLL1 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, + "WRPLL2 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, PP_STATUS(display, 0)) & PP_ON, + "Panel power on\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + "CPU PWM1 enabled\n"); if (IS_HASWELL(dev_priv)) - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, - "CPU PWM2 enabled\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, - "PCH PWM1 enabled\n"); - I915_STATE_WARN(dev_priv, - (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), - "Utility pin enabled in PWM mode\n"); - I915_STATE_WARN(dev_priv, - intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, - "PCH GTC enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + "PCH PWM1 enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, + (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), + "Utility pin enabled in PWM mode\n"); + INTEL_DISPLAY_STATE_WARN(display, + intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE, + "PCH GTC enabled\n"); /* * In theory we can still leave IRQs enabled, as long as only the HPD @@ -1220,8 +1221,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ - I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv), - "IRQs enabled\n"); + INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv), + "IRQs enabled\n"); } static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) @@ -1300,6 +1301,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, */ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 val; val = intel_de_read(dev_priv, LCPLL_CTL); @@ -1343,8 +1345,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); } /* @@ -1416,10 +1418,11 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* enable PCH reset handshake */ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); @@ -1438,28 +1441,29 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); gen9_dbuf_enable(dev_priv); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); } static void skl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); + gen9_disable_dc_states(display); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); /* The spec doesn't call for removing the reset handshake flag */ /* disable PG1 and Misc I/O */ @@ -1482,10 +1486,11 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT @@ -1506,28 +1511,29 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume mutex_unlock(&power_domains->lock); - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); gen9_dbuf_enable(dev_priv); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); } static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); + gen9_disable_dc_states(display); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); /* The spec doesn't call for removing the reset handshake flag */ @@ -1623,10 +1629,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && @@ -1657,7 +1664,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); /* 4. Enable CDCLK. */ - intel_cdclk_init_hw(dev_priv); + intel_cdclk_init_hw(display); if (DISPLAY_VER(dev_priv) >= 12) gen12_dbuf_slices_config(dev_priv); @@ -1677,14 +1684,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_snps_phy_wait_for_calibration(dev_priv); /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */ - if (DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 1)) + if (DISPLAY_VERx100(dev_priv) == 1401) intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1); if (resume) - intel_dmc_load_program(dev_priv); + intel_dmc_load_program(display); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ - if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0))) + if (IS_DISPLAY_VERx100(dev_priv, 1200, 1300)) intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); @@ -1704,14 +1711,15 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; - gen9_disable_dc_states(dev_priv); - intel_dmc_disable_program(dev_priv); + gen9_disable_dc_states(display); + intel_dmc_disable_program(display); /* 1. Disable all display engine functions -> aready done */ @@ -1719,7 +1727,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_dbuf_disable(dev_priv); /* 3. Disable CD clock */ - intel_cdclk_uninit_hw(dev_priv); + intel_cdclk_uninit_hw(display); if (DISPLAY_VER(dev_priv) == 14) intel_de_rmw(dev_priv, DC_STATE_EN, 0, @@ -2066,7 +2074,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915) */ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_display *display = &i915->display; + struct i915_power_domains *power_domains = &display->power.domains; intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&power_domains->init_wakeref); @@ -2080,7 +2089,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) * that would be blocked if the firmware was inactive. */ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && - intel_dmc_has_payload(i915)) { + intel_dmc_has_payload(display)) { intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); return; @@ -2225,9 +2234,11 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915) void intel_display_power_suspend_late(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } @@ -2239,10 +2250,12 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915) void intel_display_power_resume_early(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - gen9_sanitize_dc_state(i915); - bxt_disable_dc9(i915); + gen9_sanitize_dc_state(display); + bxt_disable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } @@ -2254,12 +2267,14 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) void intel_display_power_suspend(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + if (DISPLAY_VER(i915) >= 11) { icl_display_core_uninit(i915); - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_display_core_uninit(i915); - bxt_enable_dc9(i915); + bxt_enable_dc9(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } @@ -2267,23 +2282,24 @@ void intel_display_power_suspend(struct drm_i915_private *i915) void intel_display_power_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_display *display = &i915->display; + struct i915_power_domains *power_domains = &display->power.domains; if (DISPLAY_VER(i915) >= 11) { - bxt_disable_dc9(i915); + bxt_disable_dc9(display); icl_display_core_init(i915, true); - if (intel_dmc_has_payload(i915)) { + if (intel_dmc_has_payload(display)) { if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) - skl_enable_dc6(i915); + skl_enable_dc6(display); else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) - gen9_enable_dc5(i915); + gen9_enable_dc5(display); } } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { - bxt_disable_dc9(i915); + bxt_disable_dc9(display); bxt_display_core_init(i915, true); - if (intel_dmc_has_payload(i915) && + if (intel_dmc_has_payload(display) && (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) - gen9_enable_dc5(i915); + gen9_enable_dc5(display); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 425452c5a469..3f8f84df4733 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -232,7 +232,7 @@ intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - __intel_display_power_put_async(i915, domain, -1, -1); + __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, -1); } static inline void @@ -241,7 +241,7 @@ intel_display_power_put_async_delay(struct drm_i915_private *i915, intel_wakeref_t wakeref, int delay_ms) { - __intel_display_power_put_async(i915, domain, -1, delay_ms); + __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, delay_ms); } #endif @@ -297,10 +297,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, #define with_intel_display_power(i915, domain, wf) \ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ - intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL) #define with_intel_display_power_if_enabled(i915, domain, wf) \ for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \ - intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL) #endif /* __INTEL_DISPLAY_POWER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 10948b3964ee..5575aa0d6689 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1586,6 +1586,136 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = { I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), }; +/* + * Xe3 changes the power well hierarchy slightly from Xe_LPD+; PGB now + * depends on PG1 instead of PG2: + * + * PG0 + * | + * --PG1-- + * / | \ + * PGA PGB PG2 + * / \ + * PGC PGD + */ + +#define XE3LPD_PW_C_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C + +#define XE3LPD_PW_D_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D + +#define XE3LPD_PW_2_POWER_DOMAINS \ + XE3LPD_PW_C_POWER_DOMAINS, \ + XE3LPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_TRANSCODER_D, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4 + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_2, + XE3LPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_b, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_c, + XE3LPD_PW_C_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_d, + XE3LPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xe3lpd_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xe3lpd_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xe3lpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xe3lpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xe3lpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), + I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), + I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), + I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), + I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), + ), + .ops = &xelpdp_aux_power_well_ops, + }, +}; + +I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off, + POWER_DOMAIN_DC_OFF, + XE3LPD_PW_2_POWER_DOMAINS, + XE3LPD_PW_C_POWER_DOMAINS, + XE3LPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xe3lpd_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list xe3lpd_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff), + I915_PW_DESCRIPTORS(xe3lpd_power_wells_main), + I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1693,7 +1823,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) return 0; } - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(i915) >= 30) + return set_power_wells(power_domains, xe3lpd_power_wells); + else if (DISPLAY_VER(i915) >= 20) return set_power_wells(power_domains, xe2lpd_power_wells); else if (DISPLAY_VER(i915) >= 14) return set_power_wells(power_domains, xelpdp_power_wells); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 46e9eff12c23..f0131dd853de 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -187,8 +187,10 @@ int intel_power_well_refcount(struct i915_power_well *power_well) static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, u8 irq_pipe_mask, bool has_vga) { + struct intel_display *display = &dev_priv->display; + if (has_vga) - intel_vga_reset_io_mem(dev_priv); + intel_vga_reset_io_mem(display); if (irq_pipe_mask) gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); @@ -601,20 +603,22 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, return (val & mask) == mask; } -static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc9(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), + struct drm_i915_private *dev_priv = to_i915(display->drm); + + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, HSW_PWR_WELL_CTL2) & HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n"); - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); /* @@ -626,12 +630,14 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) */ } -static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) +static void assert_can_disable_dc9(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + struct drm_i915_private *dev_priv = to_i915(display->drm); + + drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled.\n"); @@ -644,14 +650,14 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) */ } -static void gen9_write_dc_state(struct drm_i915_private *dev_priv, +static void gen9_write_dc_state(struct intel_display *display, u32 state) { int rewrites = 0; int rereads = 0; u32 v; - intel_de_write(dev_priv, DC_STATE_EN, state); + intel_de_write(display, DC_STATE_EN, state); /* It has been observed that disabling the dc6 state sometimes * doesn't stick and dmc keeps returning old value. Make sure @@ -659,10 +665,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, * we are confident that state is exactly what we want. */ do { - v = intel_de_read(dev_priv, DC_STATE_EN); + v = intel_de_read(display, DC_STATE_EN); if (v != state) { - intel_de_write(dev_priv, DC_STATE_EN, state); + intel_de_write(display, DC_STATE_EN, state); rewrites++; rereads = 0; } else if (rereads++ > 5) { @@ -672,27 +678,28 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, } while (rewrites < 100); if (v != state) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Writing dc state to 0x%x failed, now 0x%x\n", state, v); /* Most of the times we need one retry, avoid spam */ if (rewrites > 1) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Rewrote dc state to 0x%x %d times\n", state, rewrites); } -static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) +static u32 gen9_dc_mask(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 mask; mask = DC_STATE_EN_UPTO_DC5; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; - else if (DISPLAY_VER(dev_priv) == 11) + else if (DISPLAY_VER(display) == 11) mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) mask |= DC_STATE_EN_DC9; @@ -702,17 +709,17 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) return mask; } -void gen9_sanitize_dc_state(struct drm_i915_private *i915) +void gen9_sanitize_dc_state(struct intel_display *display) { - struct i915_power_domains *power_domains = &i915->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; u32 val; - if (!HAS_DISPLAY(i915)) + if (!HAS_DISPLAY(display)) return; - val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); + val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Resetting DC state tracking from %02x to %02x\n", power_domains->dc_state, val); power_domains->dc_state = val; @@ -720,7 +727,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915) /** * gen9_set_dc_state - set target display C power state - * @dev_priv: i915 device instance + * @display: display instance * @state: target DC power state * - DC_STATE_DISABLE * - DC_STATE_EN_UPTO_DC5 @@ -741,150 +748,152 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915) * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored. */ -void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) +void gen9_set_dc_state(struct intel_display *display, u32 state) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; u32 val; u32 mask; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, + if (drm_WARN_ON_ONCE(display->drm, state & ~power_domains->allowed_dc_mask)) state &= power_domains->allowed_dc_mask; - val = intel_de_read(dev_priv, DC_STATE_EN); - mask = gen9_dc_mask(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", + val = intel_de_read(display, DC_STATE_EN); + mask = gen9_dc_mask(display); + drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n", val & mask, state); /* Check if DMC is ignoring our DC state requests */ if ((val & mask) != power_domains->dc_state) - drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", + drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", power_domains->dc_state, val & mask); val &= ~mask; val |= state; - gen9_write_dc_state(dev_priv, val); + gen9_write_dc_state(display, val); power_domains->dc_state = val & mask; } -static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_enable_dc3co(struct intel_display *display) { - drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); + drm_dbg_kms(display->drm, "Enabling DC3CO\n"); + gen9_set_dc_state(display, DC_STATE_EN_DC3CO); } -static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_disable_dc3co(struct intel_display *display) { - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + drm_dbg_kms(display->drm, "Disabling DC3CO\n"); + intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); + gen9_set_dc_state(display, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 */ usleep_range(200, 210); } -static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc5(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); enum i915_power_well_id high_pg; /* Power wells at this level and above must be disabled for DC5 entry */ - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(display) == 12) high_pg = ICL_DISP_PW_3; else high_pg = SKL_DISP_PW_2; - drm_WARN_ONCE(&dev_priv->drm, + drm_WARN_ONCE(display->drm, intel_display_power_well_is_enabled(dev_priv, high_pg), "Power wells above platform's DC5 limit still enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); assert_rpm_wakelock_held(&dev_priv->runtime_pm); - assert_dmc_loaded(dev_priv); + assert_dmc_loaded(display); } -void gen9_enable_dc5(struct drm_i915_private *dev_priv) +void gen9_enable_dc5(struct intel_display *display) { - assert_can_enable_dc5(dev_priv); + struct drm_i915_private *dev_priv = to_i915(display->drm); + + assert_can_enable_dc5(display); - drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); + drm_dbg_kms(display->drm, "Enabling DC5\n"); /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv)) + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); - intel_dmc_wl_enable(&dev_priv->display); + intel_dmc_wl_enable(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); + gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5); } -static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) +static void assert_can_enable_dc6(struct intel_display *display) { - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, UTIL_PIN_CTL) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), "Utility pin enabled in PWM mode\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & + drm_WARN_ONCE(display->drm, + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), "DC6 already programmed to be enabled.\n"); - assert_dmc_loaded(dev_priv); + assert_dmc_loaded(display); } -void skl_enable_dc6(struct drm_i915_private *dev_priv) +void skl_enable_dc6(struct intel_display *display) { - assert_can_enable_dc6(dev_priv); + struct drm_i915_private *dev_priv = to_i915(display->drm); - drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); + assert_can_enable_dc6(display); + + drm_dbg_kms(display->drm, "Enabling DC6\n"); /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(dev_priv)) + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); - intel_dmc_wl_enable(&dev_priv->display); + intel_dmc_wl_enable(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6); } -void bxt_enable_dc9(struct drm_i915_private *dev_priv) +void bxt_enable_dc9(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - assert_can_enable_dc9(dev_priv); + assert_can_enable_dc9(display); - drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); + drm_dbg_kms(display->drm, "Enabling DC9\n"); /* - * Power sequencer reset is not needed on - * platforms with South Display Engine on PCH, - * because PPS registers are always on. + * Power sequencer reset is needed on BXT/GLK, because the PPS registers + * aren't always on, unlike with South Display Engine on PCH. */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_pps_reset_all(display); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); + if (IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) + bxt_pps_reset_all(display); + gen9_set_dc_state(display, DC_STATE_EN_DC9); } -void bxt_disable_dc9(struct drm_i915_private *dev_priv) +void bxt_disable_dc9(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + assert_can_disable_dc9(display); - assert_can_disable_dc9(dev_priv); + drm_dbg_kms(display->drm, "Disabling DC9\n"); - drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); intel_pps_unlock_regs_wa(display); } @@ -910,38 +919,45 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + struct intel_display *display = &dev_priv->display; + + return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct i915_power_well *power_well; power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DISP_PW_DPIO_CMN_C); if (intel_power_well_refcount(power_well) > 0) - bxt_dpio_phy_verify_state(dev_priv, + bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); } } @@ -949,8 +965,10 @@ static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv) static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); + struct intel_display *display = &dev_priv->display; + + return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); } static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) @@ -965,27 +983,28 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) enabled_dbuf_slices); } -void gen9_disable_dc_states(struct drm_i915_private *dev_priv) +void gen9_disable_dc_states(struct intel_display *display) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct i915_power_domains *power_domains = &display->power.domains; struct intel_cdclk_config cdclk_config = {}; if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { - tgl_disable_dc3co(dev_priv); + tgl_disable_dc3co(display); return; } - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_set_dc_state(display, DC_STATE_DISABLE); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - intel_dmc_wl_disable(&dev_priv->display); + intel_dmc_wl_disable(display); - intel_cdclk_get_cdclk(dev_priv, &cdclk_config); + intel_cdclk_get_cdclk(display, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - drm_WARN_ON(&dev_priv->drm, - intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw, + drm_WARN_ON(display->drm, + intel_cdclk_clock_changed(&display->cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); @@ -993,7 +1012,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_verify_dpio_phy_power_wells(dev_priv); - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) /* * DMC retains HW context only for port A, the other combo * PHY's HW context for port B is lost after DC transitions, @@ -1005,26 +1024,29 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - gen9_disable_dc_states(dev_priv); + struct intel_display *display = &dev_priv->display; + + gen9_disable_dc_states(display); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; - if (!intel_dmc_has_payload(dev_priv)) + if (!intel_dmc_has_payload(display)) return; switch (power_domains->target_dc_state) { case DC_STATE_EN_DC3CO: - tgl_enable_dc3co(dev_priv); + tgl_enable_dc3co(display); break; case DC_STATE_EN_UPTO_DC6: - skl_enable_dc6(dev_priv); + skl_enable_dc6(display); break; case DC_STATE_EN_UPTO_DC5: - gen9_enable_dc5(dev_priv); + gen9_enable_dc5(display); break; } } @@ -1048,24 +1070,30 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_B); + struct intel_display *display = &dev_priv->display; + + if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0) + i830_enable_pipe(display, PIPE_A); + if ((intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0) + i830_enable_pipe(display, PIPE_B); } static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - i830_disable_pipe(dev_priv, PIPE_B); - i830_disable_pipe(dev_priv, PIPE_A); + struct intel_display *display = &dev_priv->display; + + i830_disable_pipe(display, PIPE_B); + i830_disable_pipe(display, PIPE_A); } static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE && - intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; + struct intel_display *display = &dev_priv->display; + + return intel_de_read(display, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE && + intel_de_read(display, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1232,7 +1260,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_crt_reset(&encoder->base); } - intel_vga_redisable_power_on(dev_priv); + intel_vga_redisable_power_on(display); intel_pps_unlock_regs_wa(display); } @@ -1248,7 +1276,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) /* make sure we're done processing display irqs */ intel_synchronize_irq(dev_priv); - intel_pps_reset_all(display); + vlv_pps_reset_all(display); /* Prevent us from re-enabling polling on accident in late suspend */ if (!dev_priv->drm.dev->power.is_suspended) @@ -1309,13 +1337,14 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) -static void assert_chv_phy_status(struct drm_i915_private *dev_priv) +static void assert_chv_phy_status(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_well *cmn_bc = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->display.power.chv_phy_control; + u32 phy_control = display->power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1326,7 +1355,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) + if (!display->power.chv_phy_assert[DPIO_PHY0]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | @@ -1334,7 +1363,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) + if (!display->power.chv_phy_assert[DPIO_PHY1]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); @@ -1362,7 +1391,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) */ if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & DPLL_VCO_ENABLE) == 0) + (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); if (BITS_SET(phy_control, @@ -1405,12 +1434,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS, + if (intel_de_wait(display, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, 10)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->display.power.chv_phy_control); + intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, display->power.chv_phy_control); } #undef BITS_SET @@ -1418,11 +1447,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct intel_display *display = &dev_priv->display; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; u32 tmp; - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); @@ -1436,9 +1466,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, + if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), 1)) - drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", + drm_err(display->drm, "Display PHY %d is not power up\n", phy); vlv_dpio_get(dev_priv); @@ -1466,24 +1496,25 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_put(dev_priv); - dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->display.power.chv_phy_control); + phy, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); } static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct intel_display *display = &dev_priv->display; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; - drm_WARN_ON_ONCE(&dev_priv->drm, + drm_WARN_ON_ONCE(display->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); @@ -1496,20 +1527,20 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, PIPE_C); } - dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->display.power.chv_phy_control); + phy, display->power.chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->display.power.chv_phy_assert[phy] = true; + display->power.chv_phy_assert[phy] = true; - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); } static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, @@ -1579,29 +1610,30 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override) { - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct intel_display *display = &dev_priv->display; + struct i915_power_domains *power_domains = &display->power.domains; bool was_override; mutex_lock(&power_domains->lock); - was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); if (override == was_override) goto out; if (override) - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->display.power.chv_phy_control); + phy, ch, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); out: mutex_unlock(&power_domains->lock); @@ -1612,29 +1644,30 @@ out: void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + struct i915_power_domains *power_domains = &display->power.domains; enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); if (override) - dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->display.power.chv_phy_control); + intel_de_write(display, DISPLAY_PHY_CONTROL, + display->power.chv_phy_control); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->display.power.chv_phy_control); + phy, ch, mask, display->power.chv_phy_control); - assert_chv_phy_status(dev_priv); + assert_chv_phy_status(display); assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 9357a9a73c06..93559f7c6100 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -12,6 +12,7 @@ struct drm_i915_private; struct i915_power_well_ops; +struct intel_display; struct intel_encoder; #define for_each_power_well(__dev_priv, __power_well) \ @@ -154,13 +155,13 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override); -void gen9_enable_dc5(struct drm_i915_private *dev_priv); -void skl_enable_dc6(struct drm_i915_private *dev_priv); -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); -void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state); -void gen9_disable_dc_states(struct drm_i915_private *dev_priv); -void bxt_enable_dc9(struct drm_i915_private *dev_priv); -void bxt_disable_dc9(struct drm_i915_private *dev_priv); +void gen9_enable_dc5(struct intel_display *display); +void skl_enable_dc6(struct intel_display *display); +void gen9_sanitize_dc_state(struct intel_display *display); +void gen9_set_dc_state(struct intel_display *display, u32 state); +void gen9_disable_dc_states(struct intel_display *display); +void bxt_enable_dc9(struct intel_display *display); +void bxt_disable_dc9(struct intel_display *display); extern const struct i915_power_well_ops i9xx_always_on_power_well_ops; extern const struct i915_power_well_ops chv_pipe_power_well_ops; diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c new file mode 100644 index 000000000000..030c4f873da1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <linux/slab.h> + +#include "i915_drv.h" +#include "intel_display_device.h" +#include "intel_display_params.h" +#include "intel_display_snapshot.h" +#include "intel_dmc.h" +#include "intel_overlay.h" + +struct intel_display_snapshot { + struct intel_display *display; + + struct intel_display_device_info info; + struct intel_display_runtime_info runtime_info; + struct intel_display_params params; + struct intel_overlay_snapshot *overlay; + struct intel_dmc_snapshot *dmc; +}; + +struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display) +{ + struct intel_display_snapshot *snapshot; + + snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); + if (!snapshot) + return NULL; + + snapshot->display = display; + + memcpy(&snapshot->info, DISPLAY_INFO(display), sizeof(snapshot->info)); + memcpy(&snapshot->runtime_info, DISPLAY_RUNTIME_INFO(display), + sizeof(snapshot->runtime_info)); + + intel_display_params_copy(&snapshot->params); + + snapshot->overlay = intel_overlay_snapshot_capture(display); + snapshot->dmc = intel_dmc_snapshot_capture(display); + + return snapshot; +} + +void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot, + struct drm_printer *p) +{ + struct intel_display *display; + + if (!snapshot) + return; + + display = snapshot->display; + + intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p); + intel_display_params_dump(&snapshot->params, display->drm->driver->name, p); + + intel_overlay_snapshot_print(snapshot->overlay, p); + intel_dmc_snapshot_print(snapshot->dmc, p); +} + +void intel_display_snapshot_free(struct intel_display_snapshot *snapshot) +{ + if (!snapshot) + return; + + intel_display_params_free(&snapshot->params); + + kfree(snapshot->overlay); + kfree(snapshot->dmc); + kfree(snapshot); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.h b/drivers/gpu/drm/i915/display/intel_display_snapshot.h new file mode 100644 index 000000000000..7ed27cdea644 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_SNAPSHOT_H__ +#define __INTEL_DISPLAY_SNAPSHOT_H__ + +struct drm_printer; +struct intel_display; +struct intel_display_snapshot; + +struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display); +void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot, + struct drm_printer *p); +void intel_display_snapshot_free(struct intel_display_snapshot *snapshot); + +#endif /* __INTEL_DISPLAY_SNAPSHOT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index c734ef1fba3c..9bd8f1e505b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -9,44 +9,85 @@ #if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) #define __INTEL_DISPLAY_TRACE_H__ +#include <linux/string.h> #include <linux/string_helpers.h> #include <linux/types.h> #include <linux/tracepoint.h> #include "i915_drv.h" #include "intel_crtc.h" +#include "intel_display_limits.h" #include "intel_display_types.h" #include "intel_vblank.h" -#define __dev_name_i915(i915) dev_name((i915)->drm.dev) +#define __dev_name_display(display) dev_name((display)->drm->dev) #define __dev_name_kms(obj) dev_name((obj)->base.dev->dev) +/* + * Using identifiers from enum pipe in TP_printk() will confuse tools that + * parse /sys/kernel/debug/tracing/{xe,i915}/<event>/format. So we use CPP + * macros instead. + */ +#define _TRACE_PIPE_A 0 +#define _TRACE_PIPE_B 1 +#define _TRACE_PIPE_C 2 +#define _TRACE_PIPE_D 3 + +/* + * FIXME: Several TP_printk() calls below display frame and scanline numbers for + * all possible pipes (regardless of whether they are available) and that is + * done with a constant format string. A better approach would be to generate + * that info dynamically based on available pipes, but, while we do not have + * that implemented yet, let's assert that the constant format string indeed + * covers all possible pipes. + */ +static_assert(I915_MAX_PIPES - 1 == _TRACE_PIPE_D); + +#define _PIPES_FRAME_AND_SCANLINE_FMT \ + "pipe A: frame=%u, scanline=%u" \ + ", pipe B: frame=%u, scanline=%u" \ + ", pipe C: frame=%u, scanline=%u" \ + ", pipe D: frame=%u, scanline=%u" + +#define _PIPES_FRAME_AND_SCANLINE_VALUES \ + __entry->frame[_TRACE_PIPE_A], __entry->scanline[_TRACE_PIPE_A] \ + , __entry->frame[_TRACE_PIPE_B], __entry->scanline[_TRACE_PIPE_B] \ + , __entry->frame[_TRACE_PIPE_C], __entry->scanline[_TRACE_PIPE_C] \ + , __entry->frame[_TRACE_PIPE_D], __entry->scanline[_TRACE_PIPE_D] + +/* + * Paranoid sanity check that at least the enumeration starts at the + * same value as _TRACE_PIPE_A. + */ +static_assert(PIPE_A == _TRACE_PIPE_A); + TRACE_EVENT(intel_pipe_enable, TP_PROTO(struct intel_crtc *crtc), TP_ARGS(crtc), TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) + __field(char, pipe_name) ), TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc *it__; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, it__) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); ), - TP_printk("dev %s, pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + TP_printk("dev %s, pipe %c enable, " _PIPES_FRAME_AND_SCANLINE_FMT, + __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(intel_pipe_disable, @@ -55,27 +96,28 @@ TRACE_EVENT(intel_pipe_disable, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) + __field(char, pipe_name) ), TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc *it__; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, it__) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); ), - TP_printk("dev %s, pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + TP_printk("dev %s, pipe %c disable, " _PIPES_FRAME_AND_SCANLINE_FMT, + __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(intel_crtc_flip_done, @@ -84,20 +126,20 @@ TRACE_EVENT(intel_crtc_flip_done, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -107,7 +149,7 @@ TRACE_EVENT(intel_pipe_crc, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(u32, crcs, 5) @@ -115,14 +157,14 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], __entry->crcs[3], @@ -130,62 +172,62 @@ TRACE_EVENT(intel_pipe_crc, ); TRACE_EVENT(intel_cpu_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_display *display, enum pipe pipe), + TP_ARGS(display, pipe), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __field(enum pipe, pipe) + __string(dev, __dev_name_display(display)) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); __assign_str(dev); - __entry->pipe = pipe; + __entry->pipe_name = pipe_name(pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), - TP_ARGS(dev_priv, pch_transcoder), + TP_PROTO(struct intel_display *display, enum pipe pch_transcoder), + TP_ARGS(display, pch_transcoder), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __field(enum pipe, pipe) + __string(dev, __dev_name_display(display)) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( enum pipe pipe = pch_transcoder; - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); __assign_str(dev); - __entry->pipe = pipe; + __entry->pipe_name = pipe_name(pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pch transcoder %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_memory_cxsr, - TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), - TP_ARGS(dev_priv, old, new), + TP_PROTO(struct intel_display *display, bool old, bool new), + TP_ARGS(display, old, new), TP_STRUCT__entry( - __string(dev, __dev_name_i915(dev_priv)) - __array(u32, frame, 3) - __array(u32, scanline, 3) + __string(dev, __dev_name_display(display)) + __array(u32, frame, I915_MAX_PIPES) + __array(u32, scanline, I915_MAX_PIPES) __field(bool, old) __field(bool, new) ), @@ -193,7 +235,11 @@ TRACE_EVENT(intel_memory_cxsr, TP_fast_assign( struct intel_crtc *crtc; __assign_str(dev); - for_each_intel_crtc(&dev_priv->drm, crtc) { + memset(__entry->frame, 0, + sizeof(__entry->frame[0]) * I915_MAX_PIPES); + memset(__entry->scanline, 0, + sizeof(__entry->scanline[0]) * I915_MAX_PIPES); + for_each_intel_crtc(display->drm, crtc) { __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } @@ -201,11 +247,9 @@ TRACE_EVENT(intel_memory_cxsr, __entry->new = new; ), - TP_printk("dev %s, cxsr %s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + TP_printk("dev %s, cxsr %s->%s, " _PIPES_FRAME_AND_SCANLINE_FMT, __get_str(dev), str_on_off(__entry->old), str_on_off(__entry->new), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) + _PIPES_FRAME_AND_SCANLINE_VALUES) ); TRACE_EVENT(g4x_wm, @@ -214,7 +258,7 @@ TRACE_EVENT(g4x_wm, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u16, primary) @@ -233,7 +277,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; @@ -251,7 +295,7 @@ TRACE_EVENT(g4x_wm, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->primary, __entry->sprite, __entry->cursor, str_yes_no(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, @@ -265,7 +309,7 @@ TRACE_EVENT(vlv_wm, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, level) @@ -280,7 +324,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; @@ -294,7 +338,7 @@ TRACE_EVENT(vlv_wm, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->level, __entry->cxsr, __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, @@ -307,7 +351,7 @@ TRACE_EVENT(vlv_fifo_size, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, sprite0_start) @@ -317,7 +361,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; @@ -326,7 +370,7 @@ TRACE_EVENT(vlv_fifo_size, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, %d/%d/%d", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->sprite0_start, __entry->sprite1_start, __entry->fifo_size) ); @@ -337,7 +381,7 @@ TRACE_EVENT(intel_plane_async_flip, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(bool, async_flip) @@ -347,14 +391,14 @@ TRACE_EVENT(intel_plane_async_flip, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->async_flip = async_flip; ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, str_yes_no(__entry->async_flip)) ); @@ -364,7 +408,7 @@ TRACE_EVENT(intel_plane_update_noarm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) @@ -375,7 +419,7 @@ TRACE_EVENT(intel_plane_update_noarm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src)); @@ -383,7 +427,7 @@ TRACE_EVENT(intel_plane_update_noarm, ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -395,7 +439,7 @@ TRACE_EVENT(intel_plane_update_arm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) @@ -406,7 +450,7 @@ TRACE_EVENT(intel_plane_update_arm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src)); @@ -414,7 +458,7 @@ TRACE_EVENT(intel_plane_update_arm, ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -426,7 +470,7 @@ TRACE_EVENT(intel_plane_disable_arm, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __string(name, plane->base.name) @@ -435,13 +479,13 @@ TRACE_EVENT(intel_plane_disable_arm, TP_fast_assign( __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -452,23 +496,24 @@ TRACE_EVENT(intel_fbc_activate, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -479,23 +524,24 @@ TRACE_EVENT(intel_fbc_deactivate, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -506,23 +552,24 @@ TRACE_EVENT(intel_fbc_nuke, TP_STRUCT__entry( __string(dev, __dev_name_kms(plane)) __string(name, plane->base.name) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + struct intel_display *display = to_intel_display(plane->base.dev); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); __assign_str(dev); __assign_str(name); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), __get_str(name), + __get_str(dev), __entry->pipe_name, __get_str(name), __entry->frame, __entry->scanline) ); @@ -532,20 +579,20 @@ TRACE_EVENT(intel_crtc_vblank_work_start, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -555,20 +602,20 @@ TRACE_EVENT(intel_crtc_vblank_work_end, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); @@ -578,7 +625,7 @@ TRACE_EVENT(intel_pipe_update_start, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, min) @@ -587,7 +634,7 @@ TRACE_EVENT(intel_pipe_update_start, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; @@ -595,7 +642,7 @@ TRACE_EVENT(intel_pipe_update_start, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->min, __entry->max) ); @@ -606,7 +653,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) __field(u32, min) @@ -615,7 +662,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = crtc->debug.start_vbl_count; __entry->scanline = crtc->debug.scanline_start; __entry->min = crtc->debug.min_vbl; @@ -623,7 +670,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline, __entry->min, __entry->max) ); @@ -634,30 +681,30 @@ TRACE_EVENT(intel_pipe_update_end, TP_STRUCT__entry( __string(dev, __dev_name_kms(crtc)) - __field(enum pipe, pipe) + __field(char, pipe_name) __field(u32, frame) __field(u32, scanline) ), TP_fast_assign( __assign_str(dev); - __entry->pipe = crtc->pipe; + __entry->pipe_name = pipe_name(crtc->pipe); __entry->frame = frame; __entry->scanline = scanline_end; ), TP_printk("dev %s, pipe %c, frame=%u, scanline=%u", - __get_str(dev), pipe_name(__entry->pipe), + __get_str(dev), __entry->pipe_name, __entry->frame, __entry->scanline) ); TRACE_EVENT(intel_frontbuffer_invalidate, - TP_PROTO(struct drm_i915_private *i915, + TP_PROTO(struct intel_display *display, unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(i915, frontbuffer_bits, origin), + TP_ARGS(display, frontbuffer_bits, origin), TP_STRUCT__entry( - __string(dev, __dev_name_i915(i915)) + __string(dev, __dev_name_display(display)) __field(unsigned int, frontbuffer_bits) __field(unsigned int, origin) ), @@ -673,12 +720,12 @@ TRACE_EVENT(intel_frontbuffer_invalidate, ); TRACE_EVENT(intel_frontbuffer_flush, - TP_PROTO(struct drm_i915_private *i915, + TP_PROTO(struct intel_display *display, unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(i915, frontbuffer_bits, origin), + TP_ARGS(display, frontbuffer_bits, origin), TP_STRUCT__entry( - __string(dev, __dev_name_i915(i915)) + __string(dev, __dev_name_display(display)) __field(unsigned int, frontbuffer_bits) __field(unsigned int, origin) ), diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index f29e5dc3db91..ff6eb93337e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -26,10 +26,8 @@ #ifndef __INTEL_DISPLAY_TYPES_H__ #define __INTEL_DISPLAY_TYPES_H__ -#include <linux/i2c.h> #include <linux/pm_qos.h> #include <linux/pwm.h> -#include <linux/sched/clock.h> #include <drm/display/drm_dp_dual_mode_helper.h> #include <drm/display/drm_dp_mst_helper.h> @@ -38,16 +36,11 @@ #include <drm/drm_atomic.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> -#include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> -#include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> #include <drm/intel/i915_hdcp_interface.h> -#include <media/cec-notifier.h> -#include "gem/i915_gem_object_types.h" /* for to_intel_bo() */ #include "i915_vma.h" #include "i915_vma_types.h" #include "intel_bios.h" @@ -57,11 +50,13 @@ #include "intel_dpll_mgr.h" #include "intel_wm_types.h" +struct cec_notifier; struct drm_printer; struct __intel_global_objs_state; +struct intel_connector; struct intel_ddi_buf_trans; struct intel_fbc; -struct intel_connector; +struct intel_hdcp_shim; struct intel_tc_port; /* @@ -430,128 +425,6 @@ struct intel_panel { struct intel_digital_port; -enum check_link_response { - HDCP_LINK_PROTECTED = 0, - HDCP_TOPOLOGY_CHANGE, - HDCP_LINK_INTEGRITY_FAILURE, - HDCP_REAUTH_REQUEST -}; - -/* - * This structure serves as a translation layer between the generic HDCP code - * and the bus-specific code. What that means is that HDCP over HDMI differs - * from HDCP over DP, so to account for these differences, we need to - * communicate with the receiver through this shim. - * - * For completeness, the 2 buses differ in the following ways: - * - DP AUX vs. DDC - * HDCP registers on the receiver are set via DP AUX for DP, and - * they are set via DDC for HDMI. - * - Receiver register offsets - * The offsets of the registers are different for DP vs. HDMI - * - Receiver register masks/offsets - * For instance, the ready bit for the KSV fifo is in a different - * place on DP vs HDMI - * - Receiver register names - * Seriously. In the DP spec, the 16-bit register containing - * downstream information is called BINFO, on HDMI it's called - * BSTATUS. To confuse matters further, DP has a BSTATUS register - * with a completely different definition. - * - KSV FIFO - * On HDMI, the ksv fifo is read all at once, whereas on DP it must - * be read 3 keys at a time - * - Aksv output - * Since Aksv is hidden in hardware, there's different procedures - * to send it over DP AUX vs DDC - */ -struct intel_hdcp_shim { - /* Outputs the transmitter's An and Aksv values to the receiver. */ - int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an); - - /* Reads the receiver's key selection vector */ - int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv); - - /* - * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The - * definitions are the same in the respective specs, but the names are - * different. Call it BSTATUS since that's the name the HDMI spec - * uses and it was there first. - */ - int (*read_bstatus)(struct intel_digital_port *dig_port, - u8 *bstatus); - - /* Determines whether a repeater is present downstream */ - int (*repeater_present)(struct intel_digital_port *dig_port, - bool *repeater_present); - - /* Reads the receiver's Ri' value */ - int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri); - - /* Determines if the receiver's KSV FIFO is ready for consumption */ - int (*read_ksv_ready)(struct intel_digital_port *dig_port, - bool *ksv_ready); - - /* Reads the ksv fifo for num_downstream devices */ - int (*read_ksv_fifo)(struct intel_digital_port *dig_port, - int num_downstream, u8 *ksv_fifo); - - /* Reads a 32-bit part of V' from the receiver */ - int (*read_v_prime_part)(struct intel_digital_port *dig_port, - int i, u32 *part); - - /* Enables HDCP signalling on the port */ - int (*toggle_signalling)(struct intel_digital_port *dig_port, - enum transcoder cpu_transcoder, - bool enable); - - /* Enable/Disable stream encryption on DP MST Transport Link */ - int (*stream_encryption)(struct intel_connector *connector, - bool enable); - - /* Ensures the link is still protected */ - bool (*check_link)(struct intel_digital_port *dig_port, - struct intel_connector *connector); - - /* Detects panel's hdcp capability. This is optional for HDMI. */ - int (*hdcp_get_capability)(struct intel_digital_port *dig_port, - bool *hdcp_capable); - - /* HDCP adaptation(DP/HDMI) required on the port */ - enum hdcp_wired_protocol protocol; - - /* Detects whether sink is HDCP2.2 capable */ - int (*hdcp_2_2_get_capability)(struct intel_connector *connector, - bool *capable); - - /* Write HDCP2.2 messages */ - int (*write_2_2_msg)(struct intel_connector *connector, - void *buf, size_t size); - - /* Read HDCP2.2 messages */ - int (*read_2_2_msg)(struct intel_connector *connector, - u8 msg_id, void *buf, size_t size); - - /* - * Implementation of DP HDCP2.2 Errata for the communication of stream - * type to Receivers. In DP HDCP2.2 Stream type is one of the input to - * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. - */ - int (*config_stream_type)(struct intel_connector *connector, - bool is_repeater, u8 type); - - /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */ - int (*stream_2_2_encryption)(struct intel_connector *connector, - bool enable); - - /* HDCP2.2 Link Integrity Check */ - int (*check_2_2_link)(struct intel_digital_port *dig_port, - struct intel_connector *connector); - - /* HDCP remote sink cap */ - int (*get_remote_hdcp_capability)(struct intel_connector *connector, - bool *hdcp_capable, bool *hdcp2_capable); -}; - struct intel_hdcp { const struct intel_hdcp_shim *shim; /* Mutex for hdcp state of the connector */ @@ -651,7 +524,7 @@ struct intel_connector { struct intel_dp *mst_port; - bool force_bigjoiner_enable; + int force_joined_pipes; struct { struct drm_dp_aux *dsc_decompression_aux; @@ -1036,6 +909,10 @@ struct intel_csc_matrix { u16 postoff[3]; }; +void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val); + +typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val); + struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1270,9 +1147,6 @@ struct intel_crtc_state { /* w/a for waiting 2 vblanks during crtc enable */ enum pipe hsw_workaround_pipe; - /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ - bool disable_lp_wm; - struct intel_crtc_wm_state wm; int min_cdclk[I915_MAX_PLANES]; @@ -1396,8 +1270,9 @@ struct intel_crtc_state { /* Only valid on TGL+ */ enum transcoder mst_master_transcoder; - /* For DSB based color LUT updates */ - struct intel_dsb *dsb_color_vblank, *dsb_color_commit; + /* For DSB based pipe updates */ + struct intel_dsb *dsb_color_vblank, *dsb_commit; + bool use_dsb; u32 psr2_man_track_ctl; @@ -1488,6 +1363,8 @@ struct intel_crtc { /* armed event for async flip */ struct drm_pending_vblank_event *flip_done_event; + /* armed event for DSB based updates */ + struct drm_pending_vblank_event *dsb_event; /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; @@ -1540,6 +1417,8 @@ struct intel_crtc { #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif + + bool block_dc_for_vblank; }; struct intel_plane { @@ -1578,22 +1457,26 @@ struct intel_plane { u32 pixel_format, u64 modifier, unsigned int rotation); /* Write all non-self arming plane registers */ - void (*update_noarm)(struct intel_plane *plane, + void (*update_noarm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Write all self-arming plane registers */ - void (*update_arm)(struct intel_plane *plane, + void (*update_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Disable the plane, must arm */ - void (*disable_arm)(struct intel_plane *plane, + void (*disable_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); int (*min_cdclk)(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*async_flip)(struct intel_plane *plane, + void (*async_flip)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); @@ -1601,14 +1484,6 @@ struct intel_plane { void (*disable_flip_done)(struct intel_plane *plane); }; -struct intel_watermark_params { - u16 fifo_size; - u16 max_wm; - u8 default_wm; - u8 guard_size; - u8 cacheline_size; -}; - #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) @@ -1622,8 +1497,6 @@ struct intel_watermark_params { #define to_intel_framebuffer(fb) \ container_of_const((fb), struct intel_framebuffer, base) -#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) - struct intel_hdmi { i915_reg_t hdmi_reg; struct { @@ -1676,7 +1549,7 @@ struct intel_pps { * Pipe whose power sequencer is currently locked into * this port. Only relevant on VLV/CHV. */ - enum pipe pps_pipe; + enum pipe vlv_pps_pipe; /* * Power sequencer index. Only relevant on BXT+. @@ -1689,12 +1562,12 @@ struct intel_pps { * the use of the PPS for any pipe currentrly driving * external DP as that will mess things up on VLV. */ - enum pipe active_pipe; + enum pipe vlv_active_pipe; /* * Set if the sequencer may be reset due to a power transition, * requiring a reinitialization. Only relevant on BXT+. */ - bool pps_reset; + bool bxt_pps_reset; struct edp_power_seq pps_delays; struct edp_power_seq bios_pps_delays; }; @@ -1745,6 +1618,8 @@ struct intel_psr { u32 dc3co_exit_delay; struct delayed_work dc3co_work; u8 entry_setup_frames; + + bool link_ok; }; struct intel_dp { @@ -1892,6 +1767,7 @@ struct intel_dp { /* When we last wrote the OUI for eDP */ unsigned long last_oui_write; + bool oui_valid; bool colorimetry_support; @@ -2050,7 +1926,10 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector) { - return enc_to_intel_dp(intel_attached_encoder(connector)); + if (connector->mst_port) + return connector->mst_port; + else + return enc_to_intel_dp(intel_attached_encoder(connector)); } static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) @@ -2228,6 +2107,10 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) __drm_device_to_intel_display((p)->base.dev) #define __intel_hdmi_to_intel_display(p) \ __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev) +#define __intel_plane_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) +#define __intel_plane_state_to_intel_display(p) \ + __drm_device_to_intel_display((p)->uapi.plane->dev) /* Helper for generic association. Map types to conversion functions/macros. */ #define __assoc(type, p) \ @@ -2246,6 +2129,8 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) __assoc(intel_digital_port, p), \ __assoc(intel_dp, p), \ __assoc(intel_encoder, p), \ - __assoc(intel_hdmi, p)) + __assoc(intel_hdmi, p), \ + __assoc(intel_plane, p), \ + __assoc(intel_plane_state, p)) #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 7c756d5ba2a2..87bdacfd9edf 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -52,7 +52,7 @@ enum intel_dmc_id { }; struct intel_dmc { - struct drm_i915_private *i915; + struct intel_display *display; struct work_struct work; const char *fw_path; u32 max_fw_size; /* bytes */ @@ -70,21 +70,21 @@ struct intel_dmc { }; /* Note: This may be NULL. */ -static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) +static struct intel_dmc *display_to_dmc(struct intel_display *display) { - return i915->display.dmc.dmc; + return display->dmc.dmc; } -static const char *dmc_firmware_param(struct drm_i915_private *i915) +static const char *dmc_firmware_param(struct intel_display *display) { - const char *p = i915->display.params.dmc_firmware_path; + const char *p = display->params.dmc_firmware_path; return p && *p ? p : NULL; } -static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) +static bool dmc_firmware_param_disabled(struct intel_display *display) { - const char *p = dmc_firmware_param(i915); + const char *p = dmc_firmware_param(display); /* Magic path to indicate disabled */ return p && !strcmp(p, "/dev/null"); @@ -113,6 +113,9 @@ static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd) +MODULE_FIRMWARE(XE3LPD_DMC_PATH); + #define XE2LPD_DMC_PATH DMC_PATH(xe2lpd) MODULE_FIRMWARE(XE2LPD_DMC_PATH); @@ -162,18 +165,22 @@ MODULE_FIRMWARE(SKL_DMC_PATH); #define BXT_DMC_MAX_FW_SIZE 0x3000 MODULE_FIRMWARE(BXT_DMC_PATH); -static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size) +static const char *dmc_firmware_default(struct intel_display *display, u32 *size) { + struct drm_i915_private *i915 = to_i915(display->drm); const char *fw_path = NULL; u32 max_fw_size = 0; - if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) { + if (DISPLAY_VERx100(display) == 3000) { + fw_path = XE3LPD_DMC_PATH; + max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VERx100(display) == 2000) { fw_path = XE2LPD_DMC_PATH; max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) { + } else if (DISPLAY_VERx100(display) == 1401) { fw_path = BMG_DMC_PATH; max_fw_size = XELPDP_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { + } else if (DISPLAY_VERx100(display) == 1400) { fw_path = MTL_DMC_PATH; max_fw_size = XELPDP_DMC_MAX_FW_SIZE; } else if (IS_DG2(i915)) { @@ -194,7 +201,7 @@ static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size } else if (IS_TIGERLAKE(i915)) { fw_path = TGL_DMC_PATH; max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER(i915) == 11) { + } else if (DISPLAY_VER(display) == 11) { fw_path = ICL_DMC_PATH; max_fw_size = ICL_DMC_MAX_FW_SIZE; } else if (IS_GEMINILAKE(i915)) { @@ -375,70 +382,70 @@ static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; } -static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) +static bool has_dmc_id_fw(struct intel_display *display, enum intel_dmc_id dmc_id) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); return dmc && dmc->dmc_info[dmc_id].payload; } -bool intel_dmc_has_payload(struct drm_i915_private *i915) +bool intel_dmc_has_payload(struct intel_display *display) { - return has_dmc_id_fw(i915, DMC_FW_MAIN); + return has_dmc_id_fw(display, DMC_FW_MAIN); } static const struct stepping_info * -intel_get_stepping_info(struct drm_i915_private *i915, +intel_get_stepping_info(struct intel_display *display, struct stepping_info *si) { - const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915)); + const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(display)); si->stepping = step_name[0]; si->substepping = step_name[1]; return si; } -static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) +static void gen9_set_dc_state_debugmask(struct intel_display *display) { /* The below bit doesn't need to be cleared ever afterwards */ - intel_de_rmw(i915, DC_STATE_DEBUG, 0, + intel_de_rmw(display, DC_STATE_DEBUG, 0, DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); - intel_de_posting_read(i915, DC_STATE_DEBUG); + intel_de_posting_read(display, DC_STATE_DEBUG); } -static void disable_event_handler(struct drm_i915_private *i915, +static void disable_event_handler(struct intel_display *display, i915_reg_t ctl_reg, i915_reg_t htp_reg) { - intel_de_write(i915, ctl_reg, + intel_de_write(display, ctl_reg, REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, DMC_EVT_CTL_EVENT_ID_FALSE)); - intel_de_write(i915, htp_reg, 0); + intel_de_write(display, htp_reg, 0); } -static void disable_all_event_handlers(struct drm_i915_private *i915) +static void disable_all_event_handlers(struct intel_display *display) { enum intel_dmc_id dmc_id; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) return; for_each_dmc_id(dmc_id) { int handler; - if (!has_dmc_id_fw(i915, dmc_id)) + if (!has_dmc_id_fw(display, dmc_id)) continue; for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) - disable_event_handler(i915, - DMC_EVT_CTL(i915, dmc_id, handler), - DMC_EVT_HTP(i915, dmc_id, handler)); + disable_event_handler(display, + DMC_EVT_CTL(display, dmc_id, handler), + DMC_EVT_HTP(display, dmc_id, handler)); } } -static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { enum pipe pipe; @@ -451,84 +458,86 @@ static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool ena */ if (enable) for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) - intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe), 0, PIPEDMC_GATING_DIS); else for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) - intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe), PIPEDMC_GATING_DIS, 0); } -static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) +static void mtl_pipedmc_clock_gating_wa(struct intel_display *display) { /* * Wa_16015201720 * The WA requires clock gating to be disabled all the time * for pipe A and B. */ - intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, + intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); } -static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { - if (DISPLAY_VER(i915) >= 14 && enable) - mtl_pipedmc_clock_gating_wa(i915); - else if (DISPLAY_VER(i915) == 13) - adlp_pipedmc_clock_gating_wa(i915, enable); + if (DISPLAY_VER(display) >= 14 && enable) + mtl_pipedmc_clock_gating_wa(display); + else if (DISPLAY_VER(display) == 13) + adlp_pipedmc_clock_gating_wa(display, enable); } -void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) +void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) return; - if (DISPLAY_VER(i915) >= 14) - intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); else - intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); } -void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) +void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) return; - if (DISPLAY_VER(i915) >= 14) - intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); else - intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); } -static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, +static bool is_dmc_evt_ctl_reg(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg) { u32 offset = i915_mmio_reg_offset(reg); - u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0)); - u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); + u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)); + u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); return offset >= start && offset < end; } -static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915, +static bool is_dmc_evt_htp_reg(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg) { u32 offset = i915_mmio_reg_offset(reg); - u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0)); - u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); + u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)); + u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); return offset >= start && offset < end; } -static bool disable_dmc_evt(struct drm_i915_private *i915, +static bool disable_dmc_evt(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg, u32 data) { - if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg)) + struct drm_i915_private *i915 = to_i915(display->drm); + + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg)) return false; /* keep all pipe DMC events disabled by default */ @@ -548,11 +557,11 @@ static bool disable_dmc_evt(struct drm_i915_private *i915, return false; } -static u32 dmc_mmiodata(struct drm_i915_private *i915, +static u32 dmc_mmiodata(struct intel_display *display, struct intel_dmc *dmc, enum intel_dmc_id dmc_id, int i) { - if (disable_dmc_evt(i915, dmc_id, + if (disable_dmc_evt(display, dmc_id, dmc->dmc_info[dmc_id].mmioaddr[i], dmc->dmc_info[dmc_id].mmiodata[i])) return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, @@ -565,25 +574,26 @@ static u32 dmc_mmiodata(struct drm_i915_private *i915, /** * intel_dmc_load_program() - write the firmware from memory to register. - * @i915: i915 drm device. + * @display: display instance * * DMC firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_dmc_load_program(struct drm_i915_private *i915) +void intel_dmc_load_program(struct intel_display *display) { - struct i915_power_domains *power_domains = &i915->display.power.domains; - struct intel_dmc *dmc = i915_to_dmc(i915); + struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm); + struct i915_power_domains *power_domains = &display->power.domains; + struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; u32 i; - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(i915, true); + pipedmc_clock_gating_wa(display, true); - disable_all_event_handlers(i915); + disable_all_event_handlers(display); assert_rpm_wakelock_held(&i915->runtime_pm); @@ -591,7 +601,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915) for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { - intel_de_write_fw(i915, + intel_de_write_fw(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), dmc->dmc_info[dmc_id].payload[i]); } @@ -601,48 +611,48 @@ void intel_dmc_load_program(struct drm_i915_private *i915) for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { - intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], - dmc_mmiodata(i915, dmc, dmc_id, i)); + intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc_mmiodata(display, dmc, dmc_id, i)); } } power_domains->dc_state = 0; - gen9_set_dc_state_debugmask(i915); + gen9_set_dc_state_debugmask(display); - pipedmc_clock_gating_wa(i915, false); + pipedmc_clock_gating_wa(display, false); } /** * intel_dmc_disable_program() - disable the firmware - * @i915: i915 drm device + * @display: display instance * * Disable all event handlers in the firmware, making sure the firmware is * inactive after the display is uninitialized. */ -void intel_dmc_disable_program(struct drm_i915_private *i915) +void intel_dmc_disable_program(struct intel_display *display) { - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(i915, true); - disable_all_event_handlers(i915); - pipedmc_clock_gating_wa(i915, false); + pipedmc_clock_gating_wa(display, true); + disable_all_event_handlers(display); + pipedmc_clock_gating_wa(display, false); - intel_dmc_wl_disable(&i915->display); + intel_dmc_wl_disable(display); } -void assert_dmc_loaded(struct drm_i915_private *i915) +void assert_dmc_loaded(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); - drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); - drm_WARN_ONCE(&i915->drm, dmc && - !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n"); + drm_WARN_ONCE(display->drm, dmc && + !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); - drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), + drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); - drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), + drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL), "DMC HTP Not fine\n"); } @@ -673,7 +683,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, const struct stepping_info *si, u8 package_ver) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; enum intel_dmc_id dmc_id; unsigned int i; @@ -681,7 +691,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; if (!is_valid_dmc_id(dmc_id)) { - drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); + drm_dbg(display->drm, "Unsupported firmware id: %u\n", dmc_id); continue; } @@ -703,7 +713,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, int header_ver, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; u32 start_range, end_range; int i; @@ -713,14 +723,14 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, } else if (dmc_id == DMC_FW_MAIN) { start_range = TGL_MAIN_MMIO_START; end_range = TGL_MAIN_MMIO_END; - } else if (DISPLAY_VER(i915) >= 13) { + } else if (DISPLAY_VER(display) >= 13) { start_range = ADLP_PIPE_MMIO_START; end_range = ADLP_PIPE_MMIO_END; - } else if (DISPLAY_VER(i915) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { start_range = TGL_PIPE_MMIO_START(dmc_id); end_range = TGL_PIPE_MMIO_END(dmc_id); } else { - drm_warn(&i915->drm, "Unknown mmio range for sanity check"); + drm_warn(display->drm, "Unknown mmio range for sanity check"); return false; } @@ -736,7 +746,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -784,39 +794,39 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, start_mmioaddr = DMC_V1_MMIO_START_RANGE; dmc_header_size = sizeof(*v1); } else { - drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", + drm_err(display->drm, "Unknown DMC fw header version: %u\n", dmc_header->header_ver); return 0; } if (header_len_bytes != dmc_header_size) { - drm_err(&i915->drm, "DMC firmware has wrong dmc header length " + drm_err(display->drm, "DMC firmware has wrong dmc header length " "(%u bytes)\n", header_len_bytes); return 0; } /* Cache the dmc header info. */ if (mmio_count > mmio_count_max) { - drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); + drm_err(display->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); return 0; } if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, dmc_header->header_ver, dmc_id)) { - drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); + drm_err(display->drm, "DMC firmware has Wrong MMIO Addresses\n"); return 0; } - drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id); + drm_dbg_kms(display->drm, "DMC %d:\n", dmc_id); for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; - drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", + drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", i, mmioaddr[i], mmiodata[i], - is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : - is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", - disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i], + is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : + is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", + disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], dmc_info->mmiodata[i]) ? " (disabling)" : ""); } dmc_info->mmio_count = mmio_count; @@ -830,7 +840,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, goto error_truncated; if (payload_size > dmc->max_fw_size) { - drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); + drm_err(display->drm, "DMC FW too big (%u bytes)\n", payload_size); return 0; } dmc_info->dmc_fw_size = dmc_header->fw_size; @@ -845,7 +855,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, return header_len_bytes + payload_size; error_truncated: - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } @@ -855,7 +865,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -868,7 +878,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, } else if (package_header->header_ver == 2) { max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; } else { - drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", + drm_err(display->drm, "DMC firmware has unknown header version %u\n", package_header->header_ver); return 0; } @@ -882,7 +892,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, goto error_truncated; if (package_header->header_len * 4 != package_size) { - drm_err(&i915->drm, "DMC firmware has wrong package header length " + drm_err(display->drm, "DMC firmware has wrong package header length " "(%u bytes)\n", package_size); return 0; } @@ -900,7 +910,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, return package_size; error_truncated: - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } @@ -909,16 +919,16 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; if (rem_size < sizeof(struct intel_css_header)) { - drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); + drm_err(display->drm, "Truncated DMC firmware, refusing.\n"); return 0; } if (sizeof(struct intel_css_header) != (css_header->header_len * 4)) { - drm_err(&i915->drm, "DMC firmware has wrong CSS header length " + drm_err(display->drm, "DMC firmware has wrong CSS header length " "(%u bytes)\n", (css_header->header_len * 4)); return 0; @@ -931,12 +941,12 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; struct stepping_info display_info = { '*', '*'}; - const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); + const struct stepping_info *si = intel_get_stepping_info(display, &display_info); enum intel_dmc_id dmc_id; u32 readcount = 0; u32 r, offset; @@ -966,7 +976,7 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; if (offset > fw->size) { - drm_err(&i915->drm, "Reading beyond the fw_size\n"); + drm_err(display->drm, "Reading beyond the fw_size\n"); continue; } @@ -974,30 +984,35 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } - if (!intel_dmc_has_payload(i915)) { - drm_err(&i915->drm, "DMC firmware main program not found\n"); + if (!intel_dmc_has_payload(display)) { + drm_err(display->drm, "DMC firmware main program not found\n"); return -ENOENT; } return 0; } -static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) +static void intel_dmc_runtime_pm_get(struct intel_display *display) { - drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); - i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); + struct drm_i915_private *i915 = to_i915(display->drm); + + drm_WARN_ON(display->drm, display->dmc.wakeref); + display->dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } -static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) +static void intel_dmc_runtime_pm_put(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->display.dmc.wakeref); + fetch_and_zero(&display->dmc.wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); } -static const char *dmc_fallback_path(struct drm_i915_private *i915) +static const char *dmc_fallback_path(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + if (IS_ALDERLAKE_P(i915)) return ADLP_DMC_FALLBACK_PATH; @@ -1007,45 +1022,45 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915) static void dmc_load_work_fn(struct work_struct *work) { struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); - struct drm_i915_private *i915 = dmc->i915; + struct intel_display *display = dmc->display; const struct firmware *fw = NULL; const char *fallback_path; int err; - err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); + err = request_firmware(&fw, dmc->fw_path, display->drm->dev); - if (err == -ENOENT && !dmc_firmware_param(i915)) { - fallback_path = dmc_fallback_path(i915); + if (err == -ENOENT && !dmc_firmware_param(display)) { + fallback_path = dmc_fallback_path(display); if (fallback_path) { - drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", + drm_dbg_kms(display->drm, "%s not found, falling back to %s\n", dmc->fw_path, fallback_path); - err = request_firmware(&fw, fallback_path, i915->drm.dev); + err = request_firmware(&fw, fallback_path, display->drm->dev); if (err == 0) dmc->fw_path = fallback_path; } } if (err) { - drm_notice(&i915->drm, + drm_notice(display->drm, "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n", dmc->fw_path, ERR_PTR(err)); - drm_notice(&i915->drm, "DMC firmware homepage: %s", + drm_notice(display->drm, "DMC firmware homepage: %s", INTEL_DMC_FIRMWARE_URL); return; } err = parse_dmc_fw(dmc, fw); if (err) { - drm_notice(&i915->drm, + drm_notice(display->drm, "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n", dmc->fw_path, ERR_PTR(err)); goto out; } - intel_dmc_load_program(i915); - intel_dmc_runtime_pm_put(i915); + intel_dmc_load_program(display); + intel_dmc_runtime_pm_put(display); - drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", + drm_info(display->drm, "Finished loading DMC firmware %s (v%u.%u)\n", dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); @@ -1055,16 +1070,17 @@ out: /** * intel_dmc_init() - initialize the firmware loading. - * @i915: i915 drm device. + * @display: display instance * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ -void intel_dmc_init(struct drm_i915_private *i915) +void intel_dmc_init(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; /* @@ -1075,35 +1091,35 @@ void intel_dmc_init(struct drm_i915_private *i915) * suspend as runtime suspend *requires* a working DMC for whatever * reason. */ - intel_dmc_runtime_pm_get(i915); + intel_dmc_runtime_pm_get(display); dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); if (!dmc) return; - dmc->i915 = i915; + dmc->display = display; INIT_WORK(&dmc->work, dmc_load_work_fn); - dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size); + dmc->fw_path = dmc_firmware_default(display, &dmc->max_fw_size); - if (dmc_firmware_param_disabled(i915)) { - drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); + if (dmc_firmware_param_disabled(display)) { + drm_info(display->drm, "Disabling DMC firmware and runtime PM\n"); goto out; } - if (dmc_firmware_param(i915)) - dmc->fw_path = dmc_firmware_param(i915); + if (dmc_firmware_param(display)) + dmc->fw_path = dmc_firmware_param(display); if (!dmc->fw_path) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "No known DMC firmware for platform, disabling runtime PM\n"); goto out; } - i915->display.dmc.dmc = dmc; + display->dmc.dmc = dmc; - drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); + drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path); queue_work(i915->unordered_wq, &dmc->work); return; @@ -1114,129 +1130,152 @@ out: /** * intel_dmc_suspend() - prepare DMC firmware before system suspend - * @i915: i915 drm device + * @display: display instance * * Prepare the DMC firmware before entering system suspend. This includes * flushing pending work items and releasing any resources acquired during * init. */ -void intel_dmc_suspend(struct drm_i915_private *i915) +void intel_dmc_suspend(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; if (dmc) flush_work(&dmc->work); - intel_dmc_wl_disable(&i915->display); + intel_dmc_wl_disable(display); /* Drop the reference held in case DMC isn't loaded. */ - if (!intel_dmc_has_payload(i915)) - intel_dmc_runtime_pm_put(i915); + if (!intel_dmc_has_payload(display)) + intel_dmc_runtime_pm_put(display); } /** * intel_dmc_resume() - init DMC firmware during system resume - * @i915: i915 drm device + * @display: display instance * * Reinitialize the DMC firmware during system resume, reacquiring any * resources released in intel_dmc_suspend(). */ -void intel_dmc_resume(struct drm_i915_private *i915) +void intel_dmc_resume(struct intel_display *display) { - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; /* * Reacquire the reference to keep RPM disabled in case DMC isn't * loaded. */ - if (!intel_dmc_has_payload(i915)) - intel_dmc_runtime_pm_get(i915); + if (!intel_dmc_has_payload(display)) + intel_dmc_runtime_pm_get(display); } /** * intel_dmc_fini() - unload the DMC firmware. - * @i915: i915 drm device. + * @display: display instance * * Firmmware unloading includes freeing the internal memory and reset the * firmware loading status. */ -void intel_dmc_fini(struct drm_i915_private *i915) +void intel_dmc_fini(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return; - intel_dmc_suspend(i915); - drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); + intel_dmc_suspend(display); + drm_WARN_ON(display->drm, display->dmc.wakeref); if (dmc) { for_each_dmc_id(dmc_id) kfree(dmc->dmc_info[dmc_id].payload); kfree(dmc); - i915->display.dmc.dmc = NULL; + display->dmc.dmc = NULL; } } -void intel_dmc_print_error_state(struct drm_printer *p, - struct drm_i915_private *i915) +struct intel_dmc_snapshot { + bool initialized; + bool loaded; + u32 version; +}; + +struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display) { - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_dmc *dmc = display_to_dmc(display); + struct intel_dmc_snapshot *snapshot; - if (!HAS_DMC(i915)) - return; + if (!HAS_DMC(display)) + return NULL; + + snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); + if (!snapshot) + return NULL; - drm_printf(p, "DMC initialized: %s\n", str_yes_no(dmc)); - drm_printf(p, "DMC loaded: %s\n", - str_yes_no(intel_dmc_has_payload(i915))); + snapshot->initialized = dmc; + snapshot->loaded = intel_dmc_has_payload(display); if (dmc) + snapshot->version = dmc->version; + + return snapshot; +} + +void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p) +{ + if (!snapshot) + return; + + drm_printf(p, "DMC initialized: %s\n", str_yes_no(snapshot->initialized)); + drm_printf(p, "DMC loaded: %s\n", str_yes_no(snapshot->loaded)); + if (snapshot->initialized) drm_printf(p, "DMC fw version: %d.%d\n", - DMC_VERSION_MAJOR(dmc->version), - DMC_VERSION_MINOR(dmc->version)); + DMC_VERSION_MAJOR(snapshot->version), + DMC_VERSION_MINOR(snapshot->version)); } static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; - struct intel_dmc *dmc = i915_to_dmc(i915); + struct intel_display *display = m->private; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_dmc *dmc = display_to_dmc(display); intel_wakeref_t wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; - if (!HAS_DMC(i915)) + if (!HAS_DMC(display)) return -ENODEV; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", - str_yes_no(intel_dmc_has_payload(i915))); + str_yes_no(intel_dmc_has_payload(display))); seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", - str_yes_no(DISPLAY_VER(i915) >= 12)); + str_yes_no(DISPLAY_VER(display) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", - str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); + str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", str_yes_no(IS_ALDERLAKE_P(i915) || - DISPLAY_VER(i915) >= 14)); + DISPLAY_VER(display) >= 14)); seq_printf(m, "Pipe B fw loaded: %s\n", - str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); + str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB))); - if (!intel_dmc_has_payload(i915)) + if (!intel_dmc_has_payload(display)) goto out; seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { i915_reg_t dc3co_reg; - if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { + if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) { dc3co_reg = DG1_DMC_DEBUG3; dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; } else { @@ -1246,7 +1285,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) } seq_printf(m, "DC3CO count: %d\n", - intel_de_read(i915, dc3co_reg)); + intel_de_read(display, dc3co_reg)); } else { dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : SKL_DMC_DC3_DC5_COUNT; @@ -1254,18 +1293,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) dc6_reg = SKL_DMC_DC5_DC6_COUNT; } - seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); + seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg)); if (i915_mmio_reg_valid(dc6_reg)) seq_printf(m, "DC5 -> DC6 count: %d\n", - intel_de_read(i915, dc6_reg)); + intel_de_read(display, dc6_reg)); seq_printf(m, "program base: 0x%08x\n", - intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); + intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); out: seq_printf(m, "ssp base: 0x%08x\n", - intel_de_read(i915, DMC_SSP_BASE)); - seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); + intel_de_read(display, DMC_SSP_BASE)); + seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL)); intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -1274,10 +1313,10 @@ out: DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); -void intel_dmc_debugfs_register(struct drm_i915_private *i915) +void intel_dmc_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, - i915, &intel_dmc_debugfs_status_fops); + display, &intel_dmc_debugfs_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 54cff6002e31..44cecef98e73 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -9,22 +9,24 @@ #include <linux/types.h> enum pipe; -struct drm_i915_private; struct drm_printer; +struct intel_display; +struct intel_dmc_snapshot; -void intel_dmc_init(struct drm_i915_private *i915); -void intel_dmc_load_program(struct drm_i915_private *i915); -void intel_dmc_disable_program(struct drm_i915_private *i915); -void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_fini(struct drm_i915_private *i915); -void intel_dmc_suspend(struct drm_i915_private *i915); -void intel_dmc_resume(struct drm_i915_private *i915); -bool intel_dmc_has_payload(struct drm_i915_private *i915); -void intel_dmc_debugfs_register(struct drm_i915_private *i915); -void intel_dmc_print_error_state(struct drm_printer *p, - struct drm_i915_private *i915); +void intel_dmc_init(struct intel_display *display); +void intel_dmc_load_program(struct intel_display *display); +void intel_dmc_disable_program(struct intel_display *display); +void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe); +void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe); +void intel_dmc_fini(struct intel_display *display); +void intel_dmc_suspend(struct intel_display *display); +void intel_dmc_resume(struct intel_display *display); +bool intel_dmc_has_payload(struct intel_display *display); +void intel_dmc_debugfs_register(struct intel_display *display); -void assert_dmc_loaded(struct drm_i915_private *i915); +struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display); +void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p); + +void assert_dmc_loaded(struct intel_display *display); #endif /* __INTEL_DMC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c index d9864b9cc429..5634ff07269d 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -109,10 +109,8 @@ static bool intel_dmc_wl_check_range(u32 address) static bool __intel_dmc_wl_supported(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) < 20 || - !intel_dmc_has_payload(i915) || + !intel_dmc_has_payload(display) || !display->params.enable_dmc_wl) return false; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 90fa73575feb..ff5ba7b3035f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -67,6 +67,7 @@ #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" @@ -82,8 +83,10 @@ #include "intel_modeset_lock.h" #include "intel_panel.h" #include "intel_pch_display.h" +#include "intel_pfit.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_runtime_pm.h" #include "intel_quirks.h" #include "intel_tc.h" #include "intel_vdsc.h" @@ -103,13 +106,6 @@ /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 -/* Compliance test status bits */ -#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 -#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) -#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) -#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) - - /* Constants for DP DSC configurations */ static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; @@ -501,7 +497,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp) if (intel_encoder_is_c10phy(encoder)) return 810000; - if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1)) + if (DISPLAY_VERx100(to_i915(encoder->base.dev)) == 1401) return 1350000; return 2000000; @@ -770,8 +766,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) intel_dp_link_config_init(intel_dp); } -static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, - u8 lane_count) +bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, + u8 lane_count) { /* * FIXME: we need to synchronize the current link parameters with @@ -865,36 +861,74 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p return bits_per_pixel; } -static -u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, - u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner) +static int bigjoiner_interface_bits(struct intel_display *display) +{ + return DISPLAY_VER(display) >= 14 ? 36 : 24; +} + +static u32 bigjoiner_bw_max_bpp(struct intel_display *display, u32 mode_clock, + int num_joined_pipes) +{ + u32 max_bpp; + /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ + int ppc = 2; + int num_big_joiners = num_joined_pipes / 2; + + max_bpp = display->cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits(display) / + intel_dp_mode_to_fec_clock(mode_clock); + + max_bpp *= num_big_joiners; + + return max_bpp; + +} + +static u32 small_joiner_ram_max_bpp(struct intel_display *display, + u32 mode_hdisplay, + int num_joined_pipes) { - u32 max_bpp_small_joiner_ram; + struct drm_i915_private *i915 = to_i915(display->drm); + u32 max_bpp; /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ - max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; + max_bpp = small_joiner_ram_size_bits(i915) / mode_hdisplay; - if (bigjoiner) { - int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; - /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ - int ppc = 2; - u32 max_bpp_bigjoiner = - i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / - intel_dp_mode_to_fec_clock(mode_clock); + max_bpp *= num_joined_pipes; - max_bpp_small_joiner_ram *= 2; + return max_bpp; +} - return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); - } +static int ultrajoiner_ram_bits(void) +{ + return 4 * 72 * 512; +} + +static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay) +{ + return ultrajoiner_ram_bits() / mode_hdisplay; +} + +static +u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, + u32 mode_clock, u32 mode_hdisplay, + int num_joined_pipes) +{ + struct intel_display *display = to_intel_display(&i915->drm); + u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes); + + if (num_joined_pipes > 1) + max_bpp = min(max_bpp, bigjoiner_bw_max_bpp(display, mode_clock, + num_joined_pipes)); + if (num_joined_pipes == 4) + max_bpp = min(max_bpp, ultrajoiner_ram_max_bpp(mode_hdisplay)); - return max_bpp_small_joiner_ram; + return max_bpp; } u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner, + int num_joined_pipes, enum intel_output_format output_format, u32 pipe_bpp, u32 timeslots) @@ -940,7 +974,7 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, intel_dp_mode_to_fec_clock(mode_clock)); joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, - mode_hdisplay, bigjoiner); + mode_hdisplay, num_joined_pipes); bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); @@ -950,7 +984,7 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, - bool bigjoiner) + int num_joined_pipes) { struct drm_i915_private *i915 = to_i915(connector->base.dev); u8 min_slice_count, i; @@ -984,14 +1018,18 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, /* Find the closest match to the valid slice count values */ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { - u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; + u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes; if (test_slice_count > drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false)) break; - /* big joiner needs small joiner to be enabled */ - if (bigjoiner && test_slice_count < 4) + /* + * Bigjoiner needs small joiner to be enabled. + * So there should be at least 2 dsc slices per pipe, + * whenever bigjoiner is enabled. + */ + if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2) continue; if (min_slice_count <= test_slice_count) @@ -1270,17 +1308,45 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, return MODE_OK; } -bool intel_dp_need_joiner(struct intel_dp *intel_dp, - struct intel_connector *connector, - int hdisplay, int clock) +static +bool intel_dp_needs_joiner(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock, + int num_joined_pipes) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int hdisplay_limit; if (!intel_dp_has_joiner(intel_dp)) return false; - return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 || - connector->force_bigjoiner_enable; + num_joined_pipes /= 2; + + hdisplay_limit = DISPLAY_VER(i915) >= 30 ? 6144 : 5120; + + return clock > num_joined_pipes * i915->display.cdclk.max_dotclk_freq || + hdisplay > num_joined_pipes * hdisplay_limit; +} + +int intel_dp_num_joined_pipes(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (connector->force_joined_pipes) + return connector->force_joined_pipes; + + if (HAS_ULTRAJOINER(i915) && + intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4)) + return 4; + + if ((HAS_BIGJOINER(i915) || HAS_UNCOMPRESSED_JOINER(i915)) && + intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2)) + return 2; + + return 1; } bool intel_dp_has_dsc(const struct intel_connector *connector) @@ -1317,7 +1383,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, u16 dsc_max_compressed_bpp = 0; u8 dsc_slice_count = 0; enum drm_mode_status status; - bool dsc = false, joiner = false; + bool dsc = false; + int num_joined_pipes; status = intel_cpu_transcoder_mode_valid(dev_priv, mode); if (status != MODE_OK) @@ -1338,11 +1405,10 @@ intel_dp_mode_valid(struct drm_connector *_connector, target_clock = fixed_mode->clock; } - if (intel_dp_need_joiner(intel_dp, connector, - mode->hdisplay, target_clock)) { - joiner = true; - max_dotclk *= 2; - } + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + mode->hdisplay, target_clock); + max_dotclk *= num_joined_pipes; + if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -1386,20 +1452,20 @@ intel_dp_mode_valid(struct drm_connector *_connector, max_lanes, target_clock, mode->hdisplay, - joiner, + num_joined_pipes, output_format, pipe_bpp, 64); dsc_slice_count = intel_dp_dsc_get_slice_count(connector, target_clock, mode->hdisplay, - joiner); + num_joined_pipes); } dsc = dsc_max_compressed_bpp && dsc_slice_count; } - if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) + if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) return MODE_CLOCK_HIGH; if (mode_rate > max_rate && !dsc) @@ -1409,7 +1475,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, if (status != MODE_OK) return status; - return intel_mode_valid_max_plane_size(dev_priv, mode, joiner); + return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes); } bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) @@ -1632,45 +1698,6 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp, return bpp; } -/* Adjust link config limits based on compliance test requests. */ -void -intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct link_config_limits *limits) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - /* For DP Compliance we override the computed bpp for the pipe */ - if (intel_dp->compliance.test_data.bpc != 0) { - int bpp = 3 * intel_dp->compliance.test_data.bpc; - - limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; - pipe_config->dither_force_disable = bpp == 6 * 3; - - drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); - } - - /* Use values requested by Compliance Test Request */ - if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { - int index; - - /* Validate the compliance test data since max values - * might have changed due to link train fallback. - */ - if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, - intel_dp->compliance.test_lane_count)) { - index = intel_dp_rate_index(intel_dp->common_rates, - intel_dp->num_common_rates, - intel_dp->compliance.test_link_rate); - if (index >= 0) - limits->min_rate = limits->max_rate = - intel_dp->compliance.test_link_rate; - limits->min_lane_count = limits->max_lane_count = - intel_dp->compliance.test_lane_count; - } - } -} - static bool has_seamless_m_n(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -2109,6 +2136,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; int dsc_joiner_max_bpp; + int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); dsc_src_min_bpp = dsc_src_min_compressed_bpp(); dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); @@ -2123,7 +2151,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, adjusted_mode->hdisplay, - pipe_config->joiner_pipes); + num_joined_pipes); dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); @@ -2308,11 +2336,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); int ret; + /* + * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional. + * Since, FEC is a bandwidth overhead, continue to not enable it for + * eDP. Until, there is a good reason to do so. + */ pipe_config->fec_enable = pipe_config->fec_enable || (!intel_dp_is_edp(intel_dp) && - intel_dp_supports_fec(intel_dp, connector, pipe_config)); + intel_dp_supports_fec(intel_dp, connector, pipe_config) && + !intel_dp_is_uhbr(pipe_config)); if (!intel_dp_supports_dsc(connector, pipe_config)) return -EINVAL; @@ -2357,7 +2392,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, intel_dp_dsc_get_slice_count(connector, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, - pipe_config->joiner_pipes); + num_joined_pipes); if (!dsc_dp_slice_count) { drm_dbg_kms(&dev_priv->drm, "Compressed Slice Count not supported\n"); @@ -2445,7 +2480,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, encoder->base.base.id, encoder->base.name, crtc->base.base.id, crtc->base.name, adjusted_mode->crtc_clock, - dsc ? "on" : "off", + str_on_off(dsc), limits->max_lane_count, limits->max_rate, limits->pipe.max_bpp, @@ -2488,7 +2523,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_rate = limits->max_rate; } - intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); + intel_dp_test_compute_config(intel_dp, crtc_state, limits); return intel_dp_compute_config_link_bpp_limits(intel_dp, crtc_state, @@ -2507,14 +2542,17 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); } -bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner) +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, + int num_joined_pipes) { /* * Pipe joiner needs compression up to display 12 due to bandwidth * limitation. DG2 onwards pipe joiner can be enabled without * compression. + * Ultrajoiner always needs compression. */ - return DISPLAY_VER(i915) < 13 && use_joiner; + return (!HAS_UNCOMPRESSED_JOINER(i915) && num_joined_pipes == 2) || + num_joined_pipes == 4; } static int @@ -2532,18 +2570,20 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; bool dsc_needed, joiner_needs_dsc; + int num_joined_pipes; int ret = 0; if (pipe_config->fec_enable && !intel_dp_supports_fec(intel_dp, connector, pipe_config)) return -EINVAL; - if (intel_dp_need_joiner(intel_dp, connector, - adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_clock)) - pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock); + if (num_joined_pipes > 1) + pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); - joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes); + joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_compute_config_limits(intel_dp, pipe_config, @@ -2742,7 +2782,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; as_sdp->length = 0x9; as_sdp->duration_incr_ms = 0; - as_sdp->duration_incr_ms = 0; if (crtc_state->cmrr.enable) { as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED; @@ -3365,30 +3404,43 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, } static void -intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) +intel_dp_init_source_oui(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 oui[] = { 0x00, 0xaa, 0x01 }; u8 buf[3] = {}; + if (READ_ONCE(intel_dp->oui_valid)) + return; + + WRITE_ONCE(intel_dp->oui_valid, true); + /* * During driver init, we want to be careful and avoid changing the source OUI if it's * already set to what we want, so as to avoid clearing any state by accident */ - if (careful) { - if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) - drm_err(&i915->drm, "Failed to read source OUI\n"); + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) + drm_err(&i915->drm, "Failed to read source OUI\n"); - if (memcmp(oui, buf, sizeof(oui)) == 0) - return; + if (memcmp(oui, buf, sizeof(oui)) == 0) { + /* Assume the OUI was written now. */ + intel_dp->last_oui_write = jiffies; + return; } - if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) - drm_err(&i915->drm, "Failed to write source OUI\n"); + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) { + drm_info(&i915->drm, "Failed to write source OUI\n"); + WRITE_ONCE(intel_dp->oui_valid, false); + } intel_dp->last_oui_write = jiffies; } +void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp) +{ + WRITE_ONCE(intel_dp->oui_valid, false); +} + void intel_dp_wait_source_oui(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -3424,8 +3476,7 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) lspcon_resume(dp_to_dig_port(intel_dp)); /* Write the source OUI as early as possible */ - if (intel_dp_is_edp(intel_dp)) - intel_edp_init_source_oui(intel_dp, false); + intel_dp_init_source_oui(intel_dp); /* * When turning on, we need to retry for 1ms to give the sink @@ -3900,7 +3951,7 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, str_enable_disable(tmp)); } -bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) +static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { u8 dprx = 0; @@ -3963,6 +4014,23 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector * intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); } +static void +intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ + if (!HAS_DSC(i915)) + return; + + if (intel_dp_is_edp(intel_dp)) + intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], + connector); + else + intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], + connector); +} + static void intel_edp_mso_mode_fixup(struct intel_connector *connector, struct drm_display_mode *mode) { @@ -4051,6 +4119,45 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; } +static void +intel_edp_set_sink_rates(struct intel_dp *intel_dp) +{ + intel_dp->num_sink_rates = 0; + + if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + int i; + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + + for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { + int val = le16_to_cpu(sink_rates[i]); + + if (val == 0) + break; + + /* Value read multiplied by 200kHz gives the per-lane + * link rate in kHz. The source rates are, however, + * stored in terms of LS_Clk kHz. The full conversion + * back to symbols is + * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) + */ + intel_dp->sink_rates[i] = (val * 200) / 10; + } + intel_dp->num_sink_rates = i; + } + + /* + * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, + * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. + */ + if (intel_dp->num_sink_rates) + intel_dp->use_rate_select = true; + else + intel_dp_set_sink_rates(intel_dp); +} + static bool intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) { @@ -4090,59 +4197,22 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector } /* + * If needed, program our source OUI so we can make various Intel-specific AUX services + * available (such as HDR backlight controls) + */ + intel_dp_init_source_oui(intel_dp); + + /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] */ intel_psr_init_dpcd(intel_dp); - /* Clear the default sink rates */ - intel_dp->num_sink_rates = 0; - - /* Read the eDP 1.4+ supported link rates. */ - if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { - __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; - int i; - - drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, - sink_rates, sizeof(sink_rates)); - - for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { - int val = le16_to_cpu(sink_rates[i]); - - if (val == 0) - break; - - /* Value read multiplied by 200kHz gives the per-lane - * link rate in kHz. The source rates are, however, - * stored in terms of LS_Clk kHz. The full conversion - * back to symbols is - * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) - */ - intel_dp->sink_rates[i] = (val * 200) / 10; - } - intel_dp->num_sink_rates = i; - } - - /* - * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, - * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. - */ - if (intel_dp->num_sink_rates) - intel_dp->use_rate_select = true; - else - intel_dp_set_sink_rates(intel_dp); + intel_edp_set_sink_rates(intel_dp); intel_dp_set_max_sink_lane_count(intel_dp); /* Read the eDP DSC DPCD registers */ - if (HAS_DSC(dev_priv)) - intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], - connector); - - /* - * If needed, program our source OUI so we can make various Intel-specific AUX services - * available (such as HDR backlight controls) - */ - intel_edp_init_source_oui(intel_dp, true); + intel_dp_detect_dsc_caps(intel_dp, connector); return true; } @@ -4771,328 +4841,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, } } -static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - int status = 0; - int test_link_rate; - u8 test_lane_count, test_link_bw; - /* (DP CTS 1.2) - * 4.3.1.11 - */ - /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, - &test_lane_count); - - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Lane count read failed\n"); - return DP_TEST_NAK; - } - test_lane_count &= DP_MAX_LANE_COUNT_MASK; - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, - &test_link_bw); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); - return DP_TEST_NAK; - } - test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); - - /* Validate the requested link rate and lane count */ - if (!intel_dp_link_params_valid(intel_dp, test_link_rate, - test_lane_count)) - return DP_TEST_NAK; - - intel_dp->compliance.test_lane_count = test_lane_count; - intel_dp->compliance.test_link_rate = test_link_rate; - - return DP_TEST_ACK; -} - -static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 test_pattern; - u8 test_misc; - __be16 h_width, v_height; - int status = 0; - - /* Read the TEST_PATTERN (DP CTS 3.1.5) */ - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, - &test_pattern); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); - return DP_TEST_NAK; - } - if (test_pattern != DP_COLOR_RAMP) - return DP_TEST_NAK; - - status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, - &h_width, 2); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "H Width read failed\n"); - return DP_TEST_NAK; - } - - status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, - &v_height, 2); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "V Height read failed\n"); - return DP_TEST_NAK; - } - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, - &test_misc); - if (status <= 0) { - drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); - return DP_TEST_NAK; - } - if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) - return DP_TEST_NAK; - if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) - return DP_TEST_NAK; - switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { - case DP_TEST_BIT_DEPTH_6: - intel_dp->compliance.test_data.bpc = 6; - break; - case DP_TEST_BIT_DEPTH_8: - intel_dp->compliance.test_data.bpc = 8; - break; - default: - return DP_TEST_NAK; - } - - intel_dp->compliance.test_data.video_pattern = test_pattern; - intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); - intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return DP_TEST_ACK; -} - -static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 test_result = DP_TEST_ACK; - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_connector *connector = &intel_connector->base; - - if (intel_connector->detect_edid == NULL || - connector->edid_corrupt || - intel_dp->aux.i2c_defer_count > 6) { - /* Check EDID read for NACKs, DEFERs and corruption - * (DP CTS 1.2 Core r1.1) - * 4.2.2.4 : Failed EDID read, I2C_NAK - * 4.2.2.5 : Failed EDID read, I2C_DEFER - * 4.2.2.6 : EDID corruption detected - * Use failsafe mode for all cases - */ - if (intel_dp->aux.i2c_nack_count > 0 || - intel_dp->aux.i2c_defer_count > 0) - drm_dbg_kms(&i915->drm, - "EDID read had %d NACKs, %d DEFERs\n", - intel_dp->aux.i2c_nack_count, - intel_dp->aux.i2c_defer_count); - intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; - } else { - /* FIXME: Get rid of drm_edid_raw() */ - const struct edid *block = drm_edid_raw(intel_connector->detect_edid); - - /* We have to write the checksum of the last block read */ - block += block->extensions; - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, - block->checksum) <= 0) - drm_dbg_kms(&i915->drm, - "Failed to write EDID checksum\n"); - - test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; - intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; - } - - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return test_result; -} - -static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = - to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum pipe pipe = crtc->pipe; - u32 pattern_val; - - switch (data->phy_pattern) { - case DP_LINK_QUAL_PATTERN_DISABLE: - drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); - if (DISPLAY_VER(dev_priv) >= 10) - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), - DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_LINK_TRAIN_NORMAL); - break; - case DP_LINK_QUAL_PATTERN_D10_2: - drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); - break; - case DP_LINK_QUAL_PATTERN_ERROR_RATE: - drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | - DDI_DP_COMP_CTL_SCRAMBLED_0); - break; - case DP_LINK_QUAL_PATTERN_PRBS7: - drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); - break; - case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: - /* - * FIXME: Ideally pattern should come from DPCD 0x250. As - * current firmware of DPR-100 could not set it, so hardcoding - * now for complaince test. - */ - drm_dbg_kms(&dev_priv->drm, - "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); - pattern_val = 0x3e0f83e0; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); - pattern_val = 0x0f83e0f8; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); - pattern_val = 0x0000f83e; - intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | - DDI_DP_COMP_CTL_CUSTOM80); - break; - case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: - /* - * FIXME: Ideally pattern should come from DPCD 0x24A. As - * current firmware of DPR-100 could not set it, so hardcoding - * now for complaince test. - */ - drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); - pattern_val = 0xFB; - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), - DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | - pattern_val); - break; - case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: - if (DISPLAY_VER(dev_priv) < 10) { - drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); - break; - } - drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); - intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), - DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); - break; - default: - drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); - } -} - -static void intel_dp_process_phy_request(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - u8 link_status[DP_LINK_STATUS_SIZE]; - - if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, - link_status) < 0) { - drm_dbg_kms(&i915->drm, "failed to get link status\n"); - return; - } - - /* retrieve vswing & pre-emphasis setting */ - intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, - link_status); - - intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); - - intel_dp_phy_pattern_update(intel_dp, crtc_state); - - drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, - intel_dp->train_set, crtc_state->lane_count); - - drm_dp_set_phy_test_pattern(&intel_dp->aux, data, - intel_dp->dpcd[DP_DPCD_REV]); -} - -static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - - if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { - drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); - return DP_TEST_NAK; - } - - /* Set test active flag here so userspace doesn't interrupt things */ - intel_dp->compliance.test_active = true; - - return DP_TEST_ACK; -} - -static void intel_dp_handle_test_request(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 response = DP_TEST_NAK; - u8 request = 0; - int status; - - status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); - if (status <= 0) { - drm_dbg_kms(&i915->drm, - "Could not read test request from sink\n"); - goto update_status; - } - - switch (request) { - case DP_TEST_LINK_TRAINING: - drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); - response = intel_dp_autotest_link_training(intel_dp); - break; - case DP_TEST_LINK_VIDEO_PATTERN: - drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); - response = intel_dp_autotest_video_pattern(intel_dp); - break; - case DP_TEST_LINK_EDID_READ: - drm_dbg_kms(&i915->drm, "EDID test requested\n"); - response = intel_dp_autotest_edid(intel_dp); - break; - case DP_TEST_LINK_PHY_TEST_PATTERN: - drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); - response = intel_dp_autotest_phy_pattern(intel_dp); - break; - default: - drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", - request); - break; - } - - if (response & DP_TEST_ACK) - intel_dp->compliance.test_type = request; - -update_status: - status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); - if (status <= 0) - drm_dbg_kms(&i915->drm, - "Could not write test response to sink\n"); -} - static bool intel_dp_link_ok(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) { @@ -5290,11 +5038,12 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return true; /* Retrain if link not ok */ - return !intel_dp_link_ok(intel_dp, link_status); + return !intel_dp_link_ok(intel_dp, link_status) && + !intel_psr_link_ok(intel_dp); } -static bool intel_dp_has_connector(struct intel_dp *intel_dp, - const struct drm_connector_state *conn_state) +bool intel_dp_has_connector(struct intel_dp *intel_dp, + const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder; @@ -5318,6 +5067,21 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp, return false; } +static void wait_for_connector_hw_done(const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_display *display = to_intel_display(connector); + + drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex); + + if (!conn_state->commit) + return; + + drm_WARN_ON(display->drm, + !wait_for_completion_timeout(&conn_state->commit->hw_done, + msecs_to_jiffies(5000))); +} + int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask) @@ -5354,10 +5118,7 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, if (!crtc_state->hw.active) continue; - if (conn_state->commit) - drm_WARN_ON(&i915->drm, - !wait_for_completion_timeout(&conn_state->commit->hw_done, - msecs_to_jiffies(5000))); + wait_for_connector_hw_done(conn_state); *pipe_mask |= BIT(crtc->pipe); } @@ -5366,6 +5127,11 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, return ret; } +void intel_dp_flush_connector_commits(struct intel_connector *connector) +{ + wait_for_connector_hw_done(connector->base.state); +} + static bool intel_dp_is_connected(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -5445,118 +5211,6 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp) intel_encoder_link_check_queue_work(encoder, 0); } -static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, - struct drm_modeset_acquire_ctx *ctx, - u8 *pipe_mask) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct drm_connector_list_iter conn_iter; - struct intel_connector *connector; - int ret = 0; - - *pipe_mask = 0; - - drm_connector_list_iter_begin(&i915->drm, &conn_iter); - for_each_intel_connector_iter(connector, &conn_iter) { - struct drm_connector_state *conn_state = - connector->base.state; - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - - if (!intel_dp_has_connector(intel_dp, conn_state)) - continue; - - crtc = to_intel_crtc(conn_state->crtc); - if (!crtc) - continue; - - ret = drm_modeset_lock(&crtc->base.mutex, ctx); - if (ret) - break; - - crtc_state = to_intel_crtc_state(crtc->base.state); - - drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); - - if (!crtc_state->hw.active) - continue; - - if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) - continue; - - *pipe_mask |= BIT(crtc->pipe); - } - drm_connector_list_iter_end(&conn_iter); - - return ret; -} - -static int intel_dp_do_phy_test(struct intel_encoder *encoder, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_crtc *crtc; - u8 pipe_mask; - int ret; - - ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, - ctx); - if (ret) - return ret; - - ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); - if (ret) - return ret; - - if (pipe_mask == 0) - return 0; - - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", - encoder->base.base.id, encoder->base.name); - - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - /* test on the MST master transcoder */ - if (DISPLAY_VER(dev_priv) >= 12 && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && - !intel_dp_mst_is_master_trans(crtc_state)) - continue; - - intel_dp_process_phy_request(intel_dp, crtc_state); - break; - } - - return 0; -} - -void intel_dp_phy_test(struct intel_encoder *encoder) -{ - struct drm_modeset_acquire_ctx ctx; - int ret; - - drm_modeset_acquire_init(&ctx, 0); - - for (;;) { - ret = intel_dp_do_phy_test(encoder, &ctx); - - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - continue; - } - - break; - } - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - drm_WARN(encoder->base.dev, ret, - "Acquiring modeset locks failed with %i\n", ret); -} - static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -5572,7 +5226,7 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); if (val & DP_AUTOMATED_TEST_REQUEST) - intel_dp_handle_test_request(intel_dp); + intel_dp_test_request(intel_dp); if (val & DP_CP_IRQ) intel_hdcp_handle_cp_irq(intel_dp->attached_connector); @@ -5625,16 +5279,11 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 old_sink_count = intel_dp->sink_count; bool reprobe_needed = false; bool ret; - /* - * Clearing compliance test variables to allow capturing - * of values for next automated test request. - */ - memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + intel_dp_test_reset(intel_dp); /* * Now read the DPCD to see if it's actually running @@ -5659,24 +5308,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) intel_psr_short_pulse(intel_dp); - switch (intel_dp->compliance.test_type) { - case DP_TEST_LINK_TRAINING: - drm_dbg_kms(&dev_priv->drm, - "Link Training Compliance Test requested\n"); - /* Send a Hotplug Uevent to userspace to start modeset */ - drm_kms_helper_hotplug_event(&dev_priv->drm); - break; - case DP_TEST_LINK_PHY_TEST_PATTERN: - drm_dbg_kms(&dev_priv->drm, - "PHY test pattern Compliance Test requested\n"); - /* - * Schedule long hpd to do the test - * - * FIXME get rid of the ad-hoc phy test modeset code - * and properly incorporate it into the normal modeset. - */ + if (intel_dp_test_short_pulse(intel_dp)) reprobe_needed = true; - } return !reprobe_needed; } @@ -5962,23 +5595,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static void -intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ - if (!HAS_DSC(i915)) - return; - - if (intel_dp_is_edp(intel_dp)) - intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], - connector); - else - intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], - connector); -} - -static void intel_dp_detect_sdp_caps(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -6012,6 +5628,10 @@ intel_dp_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(dev_priv)) return connector->status; + intel_dp_flush_connector_commits(intel_connector); + + intel_pps_vdd_on(intel_dp); + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); @@ -6033,7 +5653,7 @@ intel_dp_detect(struct drm_connector *connector, status = connector_status_disconnected; if (status == connector_status_disconnected) { - memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + intel_dp_test_reset(intel_dp); memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); intel_dp->psr.sink_panel_replay_support = false; intel_dp->psr.sink_panel_replay_su_support = false; @@ -6042,12 +5662,17 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_tunnel_disconnect(intel_dp); - goto out; + goto out_unset_edid; } + intel_dp_init_source_oui(intel_dp); + ret = intel_dp_tunnel_detect(intel_dp, ctx); - if (ret == -EDEADLK) - return ret; + if (ret == -EDEADLK) { + status = ret; + + goto out_vdd_off; + } if (ret == 1) intel_connector->base.epoch_counter++; @@ -6075,7 +5700,7 @@ intel_dp_detect(struct drm_connector *connector, * with EDID on it */ status = connector_status_disconnected; - goto out; + goto out_unset_edid; } /* @@ -6104,7 +5729,7 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_check_device_service_irq(intel_dp); -out: +out_unset_edid: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); @@ -6113,6 +5738,9 @@ out: status, intel_dp->dpcd, intel_dp->downstream_ports); +out_vdd_off: + intel_pps_vdd_off(intel_dp); + return status; } @@ -6471,7 +6099,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (dig_port->base.type == INTEL_OUTPUT_EDP && - (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { + (long_hpd || + intel_runtime_pm_suspended(&i915->runtime_pm) || + !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we @@ -6504,6 +6134,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) if (long_hpd) { intel_dp->reset_link_params = true; + intel_dp_invalidate_source_oui(intel_dp); + return IRQ_NONE; } @@ -6620,20 +6252,8 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp, struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum pipe pipe = INVALID_PIPE; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - /* - * Figure out the current pipe for the initial backlight setup. - * If the current pipe isn't valid, try the PPS pipe, and if that - * fails just assume pipe A. - */ - pipe = vlv_active_pipe(intel_dp); - - if (pipe != PIPE_A && pipe != PIPE_B) - pipe = intel_dp->pps.pps_pipe; - - if (pipe != PIPE_A && pipe != PIPE_B) - pipe = PIPE_A; - } + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + pipe = vlv_pps_backlight_initial_pipe(intel_dp); intel_backlight_setup(connector, pipe); } @@ -6801,6 +6421,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, out_vdd_off: intel_pps_vdd_off_sync(intel_dp); + intel_bios_fini_panel(&intel_connector->panel); return false; } @@ -6840,6 +6461,7 @@ bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { + struct intel_display *display = to_intel_display(dig_port); struct drm_connector *connector = &intel_connector->base; struct intel_dp *intel_dp = &dig_port->dp; struct intel_encoder *intel_encoder = &dig_port->base; @@ -6858,8 +6480,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, return false; intel_dp->reset_link_params = true; - intel_dp->pps.pps_pipe = INVALID_PIPE; - intel_dp->pps.active_pipe = INVALID_PIPE; /* Preserve the current hw state. */ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); @@ -6867,10 +6487,11 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { /* - * Currently we don't support eDP on TypeC ports, although in - * theory it could work on TypeC legacy ports. + * Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30, + * although in theory it could work on TypeC legacy ports. */ - drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder)); + drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder) && + DISPLAY_VER(dev_priv) < 30); type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; @@ -6887,7 +6508,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_set_default_max_sink_lane_count(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); + vlv_pps_pipe_init(intel_dp); intel_dp_aux_init(intel_dp); intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; @@ -6904,7 +6525,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) connector->interlace_allowed = true; - intel_connector->polled = DRM_CONNECTOR_POLL_HPD; + if (type != DRM_MODE_CONNECTOR_eDP) + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_connector->base.polled = intel_connector->polled; intel_connector_attach_encoder(intel_connector, intel_encoder); @@ -6930,7 +6552,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_add_properties(intel_dp, connector); - if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { + if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) { int ret = intel_dp_hdcp_init(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 1b9aaddd8c35..48f10876be65 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -37,9 +37,6 @@ struct link_config_limits { }; void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp); -void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct link_config_limits *limits); bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); int intel_dp_min_bpp(enum intel_output_format output_format); @@ -57,6 +54,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask); +void intel_dp_flush_connector_commits(struct intel_connector *connector); void intel_dp_link_check(struct intel_encoder *encoder); void intel_dp_check_link_state(struct intel_dp *intel_dp); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); @@ -117,13 +115,13 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, bool intel_dp_source_supports_tps3(struct drm_i915_private *i915); bool intel_dp_source_supports_tps4(struct drm_i915_private *i915); -bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, int max_dprx_rate, int max_dprx_lanes); -bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner); +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, + int num_joined_pipes); bool intel_dp_has_joiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -142,7 +140,7 @@ int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay, - bool bigjoiner, + int num_joined_pipes, enum intel_output_format output_format, u32 pipe_bpp, u32 timeslots); @@ -152,10 +150,10 @@ int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector int bpc); u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, - bool bigjoiner); -bool intel_dp_need_joiner(struct intel_dp *intel_dp, - struct intel_connector *connector, - int hdisplay, int clock); + int num_joined_pipes); +int intel_dp_num_joined_pipes(struct intel_dp *intel_dp, + struct intel_connector *connector, + int hdisplay, int clock); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { @@ -190,8 +188,8 @@ void intel_dp_sync_state(struct intel_encoder *encoder, void intel_dp_check_frl_training(struct intel_dp *intel_dp); void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); -void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp); void intel_dp_wait_source_oui(struct intel_dp *intel_dp); int intel_dp_output_bpp(enum intel_output_format output_format, int bpp); @@ -204,4 +202,9 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector); bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder); +bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, + u8 lane_count); +bool intel_dp_has_connector(struct intel_dp *intel_dp, + const struct drm_connector_state *conn_state); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 3425b3643143..00c493cc8a4b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -19,6 +19,7 @@ #include "intel_dp_hdcp.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) { @@ -57,7 +58,7 @@ static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); u8 aksv[DRM_HDCP_KSV_LEN] = {}; ssize_t dpcd_ret; @@ -65,7 +66,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to write An over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; @@ -81,7 +82,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV, aksv, DRM_HDCP_KSV_LEN); if (dpcd_ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to write Aksv over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; @@ -92,13 +93,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read Bksv from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -108,7 +109,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; /* @@ -119,7 +120,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -128,7 +129,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, static int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux, - struct drm_i915_private *i915, + struct intel_display *display, u8 *bcaps) { ssize_t ret; @@ -136,7 +137,7 @@ int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux, ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bcaps from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -148,11 +149,11 @@ static int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps); if (ret) return ret; @@ -164,13 +165,14 @@ static int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { - drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", + drm_dbg_kms(display->drm, + "Read Ri' from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -181,14 +183,14 @@ static int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -200,7 +202,7 @@ static int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; int i; @@ -212,7 +214,7 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); if (ret != len) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read ksv[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; @@ -225,7 +227,7 @@ static int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) @@ -235,7 +237,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } @@ -255,14 +257,14 @@ static bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return false; } @@ -274,11 +276,11 @@ static int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port, bool *hdcp_capable) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps); if (ret) return ret; @@ -341,7 +343,7 @@ static int intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, u8 *rx_status) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; @@ -350,7 +352,7 @@ intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_DP_RXSTATUS_LEN); if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -396,7 +398,7 @@ static ssize_t intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; @@ -429,7 +431,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, } if (ret) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "msg_id %d, ret %d, timeout(mSec): %d\n", hdcp2_msg_data->msg_id, ret, timeout); @@ -513,8 +515,8 @@ static int intel_dp_hdcp2_read_msg(struct intel_connector *connector, u8 msg_id, void *buf, size_t size) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct drm_dp_aux *aux = &dig_port->dp.aux; struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; @@ -567,7 +569,7 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, ret = drm_dp_dpcd_read(aux, offset, (void *)byte, len); if (ret < 0) { - drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", + drm_dbg_kms(display->drm, "msg_id %d, ret %zd\n", msg_id, ret); return ret; } @@ -580,7 +582,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, if (hdcp2_msg_data->msg_read_timeout > 0) { msg_expired = ktime_after(ktime_get_raw(), msg_end); if (msg_expired) { - drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n", + drm_dbg_kms(display->drm, + "msg_id %d, entire msg read timeout(mSec): %d\n", msg_id, hdcp2_msg_data->msg_read_timeout); return -ETIMEDOUT; } @@ -695,7 +698,7 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector, bool *hdcp_capable, bool *hdcp2_capable) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_dp_aux *aux; u8 bcaps; int ret; @@ -708,10 +711,10 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector, aux = &connector->port->aux; ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable); if (ret) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2 DPCD capability read failed err: %d\n", ret); - ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps); + ret = intel_dp_hdcp_read_bcaps(aux, display, &bcaps); if (ret) return ret; @@ -744,8 +747,8 @@ static int intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; @@ -753,7 +756,7 @@ intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector, hdcp->stream_transcoder, enable, TRANS_DDI_HDCP_SELECT); if (ret) - drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n", + drm_err(display->drm, "%s HDCP stream select failed (%d)\n", enable ? "Enable" : "Disable", ret); return ret; } @@ -762,8 +765,8 @@ static int intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->stream_transcoder; @@ -779,11 +782,11 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, return -EINVAL; /* Wait for encryption confirmation */ - if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port), stream_enc_status, enable ? stream_enc_status : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", - transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); + drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", + transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; } @@ -794,8 +797,8 @@ static int intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, bool enable) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->stream_transcoder; @@ -803,8 +806,8 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, enum port port = dig_port->base.port; int ret; - drm_WARN_ON(&i915->drm, enable && - !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port)) + drm_WARN_ON(display->drm, enable && + !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port)) & AUTH_STREAM_TYPE) != data->streams[0].stream_type); ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable); @@ -812,12 +815,12 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return ret; /* Wait for encryption confirmation */ - if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe), + if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe), STREAM_ENCRYPTION_STATUS, enable ? STREAM_ENCRYPTION_STATUS : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", - transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); + drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", + transcoder_name(cpu_transcoder), str_enabled_disabled(enable)); return -ETIMEDOUT; } @@ -872,13 +875,12 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { int intel_dp_hdcp_init(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_encoder *intel_encoder = &dig_port->base; enum port port = intel_encoder->port; struct intel_dp *intel_dp = &dig_port->dp; - if (!is_hdcp_supported(dev_priv, port)) + if (!is_hdcp_supported(display, port)) return 0; if (intel_connector->mst_port) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 40bedc31d6bf..397cc4ebae52 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,6 +21,8 @@ * IN THE SOFTWARE. */ +#include <linux/debugfs.h> + #include <drm/display/drm_dp_helper.h> #include "i915_drv.h" @@ -208,8 +210,10 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); - for (i = 0; i < lttpr_count; i++) + for (i = 0; i < lttpr_count; i++) { intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); + drm_dp_dump_lttpr_desc(&intel_dp->aux, DP_PHY_LTTPR(i)); + } return lttpr_count; } @@ -1677,19 +1681,11 @@ void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); } -static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *connector) -{ - if (connector->mst_port) - return connector->mst_port; - else - return enc_to_intel_dp(intel_attached_encoder(connector)); -} - static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int current_rate = -1; int force_rate; int err; @@ -1760,7 +1756,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file, struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int rate; int err; @@ -1787,7 +1783,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int current_lane_count = -1; int force_lane_count; int err; @@ -1862,7 +1858,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file, struct seq_file *m = file->private_data; struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int lane_count; int err; @@ -1889,7 +1885,7 @@ static int i915_dp_max_link_rate_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1908,7 +1904,7 @@ static int i915_dp_max_lane_count_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1927,7 +1923,7 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1945,7 +1941,7 @@ static int i915_dp_force_link_training_failure_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; if (val > 2) @@ -1969,7 +1965,7 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -1987,7 +1983,7 @@ static int i915_dp_force_link_retrain_write(void *data, u64 val) { struct intel_connector *connector = to_intel_connector(data); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); @@ -2010,7 +2006,7 @@ static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data) { struct intel_connector *connector = to_intel_connector(m->private); struct intel_display *display = to_intel_display(connector); - struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); int err; err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index eeaedd979354..5bba078c00d8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -41,9 +41,10 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" +#include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_test.h" #include "intel_dp_tunnel.h" -#include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -152,7 +153,7 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int num_joined_pipes = crtc_state->joiner_pipes; + int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); return intel_dp_dsc_get_slice_count(connector, adjusted_mode->clock, @@ -559,7 +560,7 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp, */ limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24); - intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); + intel_dp_test_compute_config(intel_dp, crtc_state, limits); if (!intel_dp_compute_config_link_bpp_limits(intel_dp, crtc_state, @@ -588,6 +589,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct link_config_limits limits; bool dsc_needed, joiner_needs_dsc; + int num_joined_pipes; int ret = 0; if (pipe_config->fec_enable && @@ -597,16 +599,17 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; - if (intel_dp_need_joiner(intel_dp, connector, - adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_clock)) - pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock); + if (num_joined_pipes > 1) + pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->joiner_pipes); + joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_mst_compute_config_limits(intel_dp, @@ -1005,6 +1008,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; @@ -1021,6 +1025,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_crtc *pipe_crtc; bool last_mst_stream; + int i; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; @@ -1028,8 +1033,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, DISPLAY_VER(dev_priv) >= 12 && last_mst_stream && !intel_dp_mst_is_master_trans(old_crtc_state)); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1053,8 +1057,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(old_crtc_state)) { + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { const struct intel_crtc_state *old_pipe_crtc_state = intel_atomic_get_old_crtc_state(state, pipe_crtc); @@ -1263,6 +1266,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; @@ -1273,7 +1277,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, enum transcoder trans = pipe_config->cpu_transcoder; bool first_mst_stream = intel_dp->active_mst_links == 1; struct intel_crtc *pipe_crtc; - int ret; + int ret, i; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); @@ -1320,8 +1324,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_enable_transcoder(pipe_config); - for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, - intel_crtc_joined_pipe_mask(pipe_config)) { + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) { const struct intel_crtc_state *pipe_crtc_state = intel_atomic_get_new_crtc_state(state, pipe_crtc); @@ -1442,10 +1445,11 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; int max_rate, mode_rate, max_lanes, max_link_clock; int ret; - bool dsc = false, joiner = false; + bool dsc = false; u16 dsc_max_compressed_bpp = 0; u8 dsc_slice_count = 0; int target_clock = mode->clock; + int num_joined_pipes; if (drm_connector_is_unregistered(connector)) { *status = MODE_ERROR; @@ -1485,11 +1489,9 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, * corresponding link capabilities of the sink) in case the * stream is uncompressed for it by the last branch device. */ - if (intel_dp_need_joiner(intel_dp, intel_connector, - mode->hdisplay, target_clock)) { - joiner = true; - max_dotclk *= 2; - } + num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector, + mode->hdisplay, target_clock); + max_dotclk *= num_joined_pipes; ret = drm_modeset_lock(&mgr->base.lock, ctx); if (ret) @@ -1515,20 +1517,20 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_lanes, target_clock, mode->hdisplay, - joiner, + num_joined_pipes, INTEL_OUTPUT_FORMAT_RGB, pipe_bpp, 64); dsc_slice_count = intel_dp_dsc_get_slice_count(intel_connector, target_clock, mode->hdisplay, - joiner); + num_joined_pipes); } dsc = dsc_max_compressed_bpp && dsc_slice_count; } - if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) { + if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) { *status = MODE_CLOCK_HIGH; return 0; } @@ -1538,7 +1540,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, return 0; } - *status = intel_mode_valid_max_plane_size(dev_priv, mode, joiner); + *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes); return 0; } @@ -1571,6 +1573,8 @@ intel_dp_mst_detect(struct drm_connector *connector, if (!intel_display_driver_check_access(i915)) return connector->status; + intel_dp_flush_connector_commits(intel_connector); + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, intel_connector->port); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c new file mode 100644 index 000000000000..e05819300d77 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_test.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <linux/debugfs.h> + +#include <drm/display/drm_dp.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_ddi.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dp_mst.h" +#include "intel_dp_test.h" + +void intel_dp_test_reset(struct intel_dp *intel_dp) +{ + /* + * Clearing compliance test variables to allow capturing + * of values for next automated test request. + */ + memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); +} + +/* Adjust link config limits based on compliance test requests. */ +void intel_dp_test_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits) +{ + struct intel_display *display = to_intel_display(intel_dp); + + /* For DP Compliance we override the computed bpp for the pipe */ + if (intel_dp->compliance.test_data.bpc != 0) { + int bpp = 3 * intel_dp->compliance.test_data.bpc; + + limits->pipe.min_bpp = bpp; + limits->pipe.max_bpp = bpp; + pipe_config->dither_force_disable = bpp == 6 * 3; + + drm_dbg_kms(display->drm, "Setting pipe_bpp to %d\n", bpp); + } + + /* Use values requested by Compliance Test Request */ + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { + int index; + + /* Validate the compliance test data since max values + * might have changed due to link train fallback. + */ + if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, + intel_dp->compliance.test_lane_count)) { + index = intel_dp_rate_index(intel_dp->common_rates, + intel_dp->num_common_rates, + intel_dp->compliance.test_link_rate); + if (index >= 0) { + limits->min_rate = intel_dp->compliance.test_link_rate; + limits->max_rate = intel_dp->compliance.test_link_rate; + } + limits->min_lane_count = intel_dp->compliance.test_lane_count; + limits->max_lane_count = intel_dp->compliance.test_lane_count; + } + } +} + +/* Compliance test status bits */ +#define INTEL_DP_RESOLUTION_PREFERRED 1 +#define INTEL_DP_RESOLUTION_STANDARD 2 +#define INTEL_DP_RESOLUTION_FAILSAFE 3 + +static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + int status = 0; + int test_link_rate; + u8 test_lane_count, test_link_bw; + /* (DP CTS 1.2) + * 4.3.1.11 + */ + /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, + &test_lane_count); + + if (status <= 0) { + drm_dbg_kms(display->drm, "Lane count read failed\n"); + return DP_TEST_NAK; + } + test_lane_count &= DP_MAX_LANE_COUNT_MASK; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (status <= 0) { + drm_dbg_kms(display->drm, "Link Rate read failed\n"); + return DP_TEST_NAK; + } + test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + /* Validate the requested link rate and lane count */ + if (!intel_dp_link_params_valid(intel_dp, test_link_rate, + test_lane_count)) + return DP_TEST_NAK; + + intel_dp->compliance.test_lane_count = test_lane_count; + intel_dp->compliance.test_link_rate = test_link_rate; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 test_pattern; + u8 test_misc; + __be16 h_width, v_height; + int status = 0; + + /* Read the TEST_PATTERN (DP CTS 3.1.5) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, + &test_pattern); + if (status <= 0) { + drm_dbg_kms(display->drm, "Test pattern read failed\n"); + return DP_TEST_NAK; + } + if (test_pattern != DP_COLOR_RAMP) + return DP_TEST_NAK; + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, + &h_width, 2); + if (status <= 0) { + drm_dbg_kms(display->drm, "H Width read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, + &v_height, 2); + if (status <= 0) { + drm_dbg_kms(display->drm, "V Height read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, + &test_misc); + if (status <= 0) { + drm_dbg_kms(display->drm, "TEST MISC read failed\n"); + return DP_TEST_NAK; + } + if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) + return DP_TEST_NAK; + if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) + return DP_TEST_NAK; + switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { + case DP_TEST_BIT_DEPTH_6: + intel_dp->compliance.test_data.bpc = 6; + break; + case DP_TEST_BIT_DEPTH_8: + intel_dp->compliance.test_data.bpc = 8; + break; + default: + return DP_TEST_NAK; + } + + intel_dp->compliance.test_data.video_pattern = test_pattern; + intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); + intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 test_result = DP_TEST_ACK; + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + + if (!intel_connector->detect_edid || connector->edid_corrupt || + intel_dp->aux.i2c_defer_count > 6) { + /* Check EDID read for NACKs, DEFERs and corruption + * (DP CTS 1.2 Core r1.1) + * 4.2.2.4 : Failed EDID read, I2C_NAK + * 4.2.2.5 : Failed EDID read, I2C_DEFER + * 4.2.2.6 : EDID corruption detected + * Use failsafe mode for all cases + */ + if (intel_dp->aux.i2c_nack_count > 0 || + intel_dp->aux.i2c_defer_count > 0) + drm_dbg_kms(display->drm, + "EDID read had %d NACKs, %d DEFERs\n", + intel_dp->aux.i2c_nack_count, + intel_dp->aux.i2c_defer_count); + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; + } else { + /* FIXME: Get rid of drm_edid_raw() */ + const struct edid *block = drm_edid_raw(intel_connector->detect_edid); + + /* We have to write the checksum of the last block read */ + block += block->extensions; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, + block->checksum) <= 0) + drm_dbg_kms(display->drm, + "Failed to write EDID checksum\n"); + + test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; + } + + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return test_result; +} + +static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe = crtc->pipe; + u32 pattern_val; + + switch (data->phy_pattern) { + case DP_LINK_QUAL_PATTERN_DISABLE: + drm_dbg_kms(display->drm, "Disable Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0); + if (DISPLAY_VER(display) >= 10) + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_NORMAL); + break; + case DP_LINK_QUAL_PATTERN_D10_2: + drm_dbg_kms(display->drm, "Set D10.2 Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); + break; + case DP_LINK_QUAL_PATTERN_ERROR_RATE: + drm_dbg_kms(display->drm, + "Set Error Count Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_SCRAMBLED_0); + break; + case DP_LINK_QUAL_PATTERN_PRBS7: + drm_dbg_kms(display->drm, "Set PRBS7 Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); + break; + case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: + /* + * FIXME: Ideally pattern should come from DPCD 0x250. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + drm_dbg_kms(display->drm, + "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); + pattern_val = 0x3e0f83e0; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 0), pattern_val); + pattern_val = 0x0f83e0f8; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 1), pattern_val); + pattern_val = 0x0000f83e; + intel_de_write(display, DDI_DP_COMP_PAT(pipe, 2), pattern_val); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | + DDI_DP_COMP_CTL_CUSTOM80); + break; + case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: + /* + * FIXME: Ideally pattern should come from DPCD 0x24A. As + * current firmware of DPR-100 could not set it, so hardcoding + * now for complaince test. + */ + drm_dbg_kms(display->drm, + "Set HBR2 compliance Phy Test Pattern\n"); + pattern_val = 0xFB; + intel_de_write(display, DDI_DP_COMP_CTL(pipe), + DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | + pattern_val); + break; + case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: + if (DISPLAY_VER(display) < 10) { + drm_warn(display->drm, + "Platform does not support TPS4\n"); + break; + } + drm_dbg_kms(display->drm, + "Set TPS4 compliance Phy Test Pattern\n"); + intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0); + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); + break; + default: + drm_warn(display->drm, "Invalid Phy Test Pattern\n"); + } +} + +static void intel_dp_process_phy_request(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, + link_status) < 0) { + drm_dbg_kms(display->drm, "failed to get link status\n"); + return; + } + + /* retrieve vswing & pre-emphasis setting */ + intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, + link_status); + + intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); + + intel_dp_phy_pattern_update(intel_dp, crtc_state); + + drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, + intel_dp->train_set, crtc_state->lane_count); + + drm_dp_set_phy_test_pattern(&intel_dp->aux, data, + intel_dp->dpcd[DP_DPCD_REV]); +} + +static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; + + if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { + drm_dbg_kms(display->drm, + "DP Phy Test pattern AUX read failure\n"); + return DP_TEST_NAK; + } + + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; + + return DP_TEST_ACK; +} + +void intel_dp_test_request(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + u8 response = DP_TEST_NAK; + u8 request = 0; + int status; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + drm_dbg_kms(display->drm, + "Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + drm_dbg_kms(display->drm, "LINK_TRAINING test requested\n"); + response = intel_dp_autotest_link_training(intel_dp); + break; + case DP_TEST_LINK_VIDEO_PATTERN: + drm_dbg_kms(display->drm, "TEST_PATTERN test requested\n"); + response = intel_dp_autotest_video_pattern(intel_dp); + break; + case DP_TEST_LINK_EDID_READ: + drm_dbg_kms(display->drm, "EDID test requested\n"); + response = intel_dp_autotest_edid(intel_dp); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + drm_dbg_kms(display->drm, "PHY_PATTERN test requested\n"); + response = intel_dp_autotest_phy_pattern(intel_dp); + break; + default: + drm_dbg_kms(display->drm, "Invalid test request '%02x'\n", + request); + break; + } + + if (response & DP_TEST_ACK) + intel_dp->compliance.test_type = request; + +update_status: + status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + drm_dbg_kms(display->drm, + "Could not write test response to sink\n"); +} + +/* phy test */ + +static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; + int ret = 0; + + *pipe_mask = 0; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state = + connector->base.state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!intel_dp_has_connector(intel_dp, conn_state)) + continue; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + continue; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + break; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + drm_WARN_ON(display->drm, + !intel_crtc_has_dp_encoder(crtc_state)); + + if (!crtc_state->hw.active) + continue; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + continue; + + *pipe_mask |= BIT(crtc->pipe); + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int intel_dp_do_phy_test(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_display *display = to_intel_display(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_crtc *crtc; + u8 pipe_mask; + int ret; + + ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, + ctx); + if (ret) + return ret; + + ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); + if (ret) + return ret; + + if (pipe_mask == 0) + return 0; + + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] PHY test\n", + encoder->base.base.id, encoder->base.name); + + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + /* test on the MST master transcoder */ + if (DISPLAY_VER(display) >= 12 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && + !intel_dp_mst_is_master_trans(crtc_state)) + continue; + + intel_dp_process_phy_request(intel_dp, crtc_state); + break; + } + + return 0; +} + +bool intel_dp_test_phy(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; + struct drm_modeset_acquire_ctx ctx; + int ret; + + if (!intel_dp->compliance.test_active || + intel_dp->compliance.test_type != DP_TEST_LINK_PHY_TEST_PATTERN) + return false; + + drm_modeset_acquire_init(&ctx, 0); + + for (;;) { + ret = intel_dp_do_phy_test(encoder, &ctx); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + continue; + } + + break; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_WARN(encoder->base.dev, ret, + "Acquiring modeset locks failed with %i\n", ret); + + return true; +} + +bool intel_dp_test_short_pulse(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + bool reprobe_needed = false; + + switch (intel_dp->compliance.test_type) { + case DP_TEST_LINK_TRAINING: + drm_dbg_kms(display->drm, + "Link Training Compliance Test requested\n"); + /* Send a Hotplug Uevent to userspace to start modeset */ + drm_kms_helper_hotplug_event(display->drm); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + drm_dbg_kms(display->drm, + "PHY test pattern Compliance Test requested\n"); + /* + * Schedule long hpd to do the test + * + * FIXME get rid of the ad-hoc phy test modeset code + * and properly incorporate it into the normal modeset. + */ + reprobe_needed = true; + } + + return reprobe_needed; +} + +static ssize_t i915_displayport_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_display *display = m->private; + char *input_buffer; + int status = 0; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + int val = 0; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + drm_dbg_kms(display->drm, "Copied %d bytes from user\n", (unsigned int)len); + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) + break; + drm_dbg_kms(display->drm, "Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + intel_dp->compliance.test_active = true; + else + intel_dp->compliance.test_active = false; + } + } + drm_connector_list_iter_end(&conn_iter); + kfree(input_buffer); + if (status < 0) + return status; + + *offp += len; + return len; +} + +static int i915_displayport_test_active_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_active) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static int i915_displayport_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_displayport_test_active_show, + inode->i_private); +} + +static const struct file_operations i915_displayport_test_active_fops = { + .owner = THIS_MODULE, + .open = i915_displayport_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_displayport_test_active_write +}; + +static int i915_displayport_test_data_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_type == + DP_TEST_LINK_EDID_READ) + seq_printf(m, "%lx", + intel_dp->compliance.test_data.edid); + else if (intel_dp->compliance.test_type == + DP_TEST_LINK_VIDEO_PATTERN) { + seq_printf(m, "hdisplay: %d\n", + intel_dp->compliance.test_data.hdisplay); + seq_printf(m, "vdisplay: %d\n", + intel_dp->compliance.test_data.vdisplay); + seq_printf(m, "bpc: %u\n", + intel_dp->compliance.test_data.bpc); + } else if (intel_dp->compliance.test_type == + DP_TEST_LINK_PHY_TEST_PATTERN) { + seq_printf(m, "pattern: %d\n", + intel_dp->compliance.test_data.phytest.phy_pattern); + seq_printf(m, "Number of lanes: %d\n", + intel_dp->compliance.test_data.phytest.num_lanes); + seq_printf(m, "Link Rate: %d\n", + intel_dp->compliance.test_data.phytest.link_rate); + seq_printf(m, "level: %02x\n", + intel_dp->train_set[0]); + } + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); + +static int i915_displayport_test_type_show(struct seq_file *m, void *data) +{ + struct intel_display *display = m->private; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(display->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); + } else { + seq_puts(m, "0"); + } + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); + +static const struct { + const char *name; + const struct file_operations *fops; +} intel_display_debugfs_files[] = { + {"i915_dp_test_data", &i915_displayport_test_data_fops}, + {"i915_dp_test_type", &i915_displayport_test_type_fops}, + {"i915_dp_test_active", &i915_displayport_test_active_fops}, +}; + +void intel_dp_test_debugfs_register(struct intel_display *display) +{ + struct drm_minor *minor = display->drm->primary; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { + debugfs_create_file(intel_display_debugfs_files[i].name, + 0644, + minor->debugfs_root, + display, + intel_display_debugfs_files[i].fops); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.h b/drivers/gpu/drm/i915/display/intel_dp_test.h new file mode 100644 index 000000000000..dcc167e4c7f6 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_test.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_DP_TEST_H__ +#define __INTEL_DP_TEST_H__ + +#include <linux/types.h> + +struct intel_crtc_state; +struct intel_display; +struct intel_dp; +struct link_config_limits; + +void intel_dp_test_reset(struct intel_dp *intel_dp); +void intel_dp_test_request(struct intel_dp *intel_dp); +void intel_dp_test_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits); +bool intel_dp_test_phy(struct intel_dp *intel_dp); +bool intel_dp_test_short_pulse(struct intel_dp *intel_dp); +void intel_dp_test_debugfs_register(struct intel_display *display); + +#endif /* __INTEL_DP_TEST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h index a0c00b7d3303..e9314cf25a19 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h @@ -20,7 +20,7 @@ struct intel_dp; struct intel_encoder; struct intel_link_bw_limits; -#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) +#if IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx); void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index d20e4e9cf7f7..0f12f2c3467c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -219,8 +219,10 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = { }; static const struct bxt_dpio_phy_info * -bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) +bxt_get_phy_list(struct intel_display *display, int *count) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (IS_GEMINILAKE(dev_priv)) { *count = ARRAY_SIZE(glk_dpio_phy_info); return glk_dpio_phy_info; @@ -231,22 +233,22 @@ bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) } static const struct bxt_dpio_phy_info * -bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) +bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy) { int count; const struct bxt_dpio_phy_info *phy_list = - bxt_get_phy_list(dev_priv, &count); + bxt_get_phy_list(display, &count); return &phy_list[phy]; } -void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { const struct bxt_dpio_phy_info *phy_info, *phys; int i, count; - phys = bxt_get_phy_list(dev_priv, &count); + phys = bxt_get_phy_list(display, &count); for (i = 0; i < count; i++) { phy_info = &phys[i]; @@ -265,7 +267,7 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, } } - drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c", + drm_WARN(display->drm, 1, "PHY not found for PORT %c", port_name(port)); *phy = DPIO_PHY0; *ch = DPIO_CH0; @@ -275,16 +277,16 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, * Like intel_de_rmw() but reads from a single per-lane register and * writes to the group register to write the same value to all the lanes. */ -static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915, +static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display, i915_reg_t reg_single, i915_reg_t reg_group, u32 clear, u32 set) { u32 old, val; - old = intel_de_read(i915, reg_single); + old = intel_de_read(display, reg_single); val = (old & ~clear) | set; - intel_de_write(i915, reg_group, val); + intel_de_write(display, reg_group, val); return old; } @@ -292,30 +294,30 @@ static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915, void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; enum dpio_channel ch; enum dpio_phy phy; int lane, n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; - bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch); + bxt_port_to_phy_channel(display, encoder->port, &phy, &ch); /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), BXT_PORT_PCS_DW10_GRP(phy, ch), TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0); for (lane = 0; lane < crtc_state->lane_count; lane++) { int level = intel_ddi_level(encoder, crtc_state, lane); - intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane), MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK, MARGIN_000(trans->entries[level].bxt.margin) | UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale)); @@ -325,50 +327,50 @@ void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, int level = intel_ddi_level(encoder, crtc_state, lane); u32 val; - intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane), SCALE_DCOMP_METHOD, trans->entries[level].bxt.enable ? SCALE_DCOMP_METHOD : 0); - val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane)); + val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane)); if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Disabled scaling while ouniqetrangenmethod was set"); } for (lane = 0; lane < crtc_state->lane_count; lane++) { int level = intel_ddi_level(encoder, crtc_state, lane); - intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane), DE_EMPHASIS_MASK, DE_EMPHASIS(trans->entries[level].bxt.deemphasis)); } - bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), BXT_PORT_PCS_DW10_GRP(phy, ch), 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); } -bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) + if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) return false; - if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) & + if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) & (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "DDI PHY %d powered, but power hasn't settled\n", phy); return false; } - if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { - drm_dbg(&dev_priv->drm, + if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { + drm_dbg(display->drm, "DDI PHY %d powered, but still in reset\n", phy); return false; @@ -377,47 +379,44 @@ bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, return true; } -static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy) { - u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy)); + u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy)); return REG_FIELD_GET(GRC_CODE_MASK, val); } -static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, +static void bxt_phy_wait_grc_done(struct intel_display *display, enum dpio_phy phy) { - if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy), - GRC_DONE, 10)) - drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n", - phy); + if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) + drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy); } -static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; u32 val; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - if (bxt_dpio_phy_is_enabled(dev_priv, phy)) { + if (bxt_dpio_phy_is_enabled(display, phy)) { /* Still read out the GRC value for state verification */ if (phy_info->rcomp_phy != -1) - dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy); + display->state.bxt_phy_grc = bxt_get_grc(display, phy); - if (bxt_dpio_phy_verify_state(dev_priv, phy)) { - drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, " + if (bxt_dpio_phy_verify_state(display, phy)) { + drm_dbg(display->drm, "DDI PHY %d already enabled, " "won't reprogram it\n", phy); return; } - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "DDI PHY %d enabled with invalid state, " "force reprogramming it\n", phy); } - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); /* * The PHY registers start out inaccessible and respond to reads with @@ -427,92 +426,91 @@ static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv, * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy), + if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) - drm_err(&dev_priv->drm, "timeout during PHY%d power on\n", + drm_err(display->drm, "timeout during PHY%d power on\n", phy); /* Program PLL Rcomp code offset */ - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), + intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4)); - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), + intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4)); /* Program power gating */ - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, + intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0, OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); if (phy_info->dual_channel) - intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, + intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0, DW6_OLDO_DYN_PWR_DOWN_EN); if (phy_info->rcomp_phy != -1) { u32 grc_code; - bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); + bxt_phy_wait_grc_done(display, phy_info->rcomp_phy); /* * PHY0 isn't connected to an RCOMP resistor so copy over * the corresponding calibrated value from PHY1, and disable * the automatic calibration on PHY0. */ - val = bxt_get_grc(dev_priv, phy_info->rcomp_phy); - dev_priv->display.state.bxt_phy_grc = val; + val = bxt_get_grc(display, phy_info->rcomp_phy); + display->state.bxt_phy_grc = val; grc_code = GRC_CODE_FAST(val) | GRC_CODE_SLOW(val) | GRC_CODE_NOM(val); - intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); - intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), + intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code); + intel_de_rmw(display, BXT_PORT_REF_DW8(phy), 0, GRC_DIS | GRC_RDY_OVRD); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); - intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); + intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } -void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); - intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); + intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } -void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { - const struct bxt_dpio_phy_info *phy_info = - bxt_get_phy_info(dev_priv, phy); + const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy); enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; - lockdep_assert_held(&dev_priv->display.power.domains.lock); + lockdep_assert_held(&display->power.domains.lock); was_enabled = true; if (rcomp_phy != -1) - was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy); + was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy); /* * We need to copy the GRC calibration value from rcomp_phy, * so make sure it's powered up. */ if (!was_enabled) - _bxt_dpio_phy_init(dev_priv, rcomp_phy); + _bxt_dpio_phy_init(display, rcomp_phy); - _bxt_dpio_phy_init(dev_priv, phy); + _bxt_dpio_phy_init(display, phy); if (!was_enabled) - bxt_dpio_phy_uninit(dev_priv, rcomp_phy); + bxt_dpio_phy_uninit(display, rcomp_phy); } static bool __printf(6, 7) -__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, +__phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy, i915_reg_t reg, u32 mask, u32 expected, const char *reg_fmt, ...) { @@ -520,7 +518,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, va_list args; u32 val; - val = intel_de_read(dev_priv, reg); + val = intel_de_read(display, reg); if ((val & mask) == expected) return true; @@ -528,7 +526,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, vaf.fmt = reg_fmt; vaf.va = &args; - drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " + drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " "current %08x, expected %08x (mask %08x)\n", phy, &vaf, reg.reg, val, (val & ~mask) | expected, mask); @@ -538,20 +536,20 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, return false; } -bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy) { const struct bxt_dpio_phy_info *phy_info; u32 mask; bool ok; - phy_info = bxt_get_phy_info(dev_priv, phy); + phy_info = bxt_get_phy_info(display, phy); #define _CHK(reg, mask, exp, fmt, ...) \ - __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ + __phy_reg_verify_state(display, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) - if (!bxt_dpio_phy_is_enabled(dev_priv, phy)) + if (!bxt_dpio_phy_is_enabled(display, phy)) return false; ok = true; @@ -575,7 +573,7 @@ bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, "BXT_PORT_CL2CM_DW6(%d)", phy); if (phy_info->rcomp_phy != -1) { - u32 grc_code = dev_priv->display.state.bxt_phy_grc; + u32 grc_code = display->state.bxt_phy_grc; grc_code = GRC_CODE_FAST(grc_code) | GRC_CODE_SLOW(grc_code) | @@ -614,20 +612,20 @@ bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count) void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 lane_lat_optim_mask) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { /* * Note that on CHV this flag is called UPAR, but has * the same function. */ - intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), + intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane), LATENCY_OPTIM, lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0); } @@ -636,18 +634,18 @@ void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; u8 mask; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); mask = 0; for (lane = 0; lane < 4; lane++) { - u32 val = intel_de_read(dev_priv, + u32 val = intel_de_read(display, BXT_PORT_TX_DW14_LN(phy, ch, lane)); if (val & LATENCY_OPTIM) diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 226994dcb89b..a82939165546 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -10,9 +10,9 @@ enum pipe; enum port; -struct drm_i915_private; struct intel_crtc_state; struct intel_digital_port; +struct intel_display; struct intel_encoder; enum dpio_channel { @@ -27,15 +27,15 @@ enum dpio_phy { }; #ifdef I915 -void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); -void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); -void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); -bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy); +void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy); +bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy); -bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy); u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count); void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, @@ -73,7 +73,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); #else -static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, +static inline void bxt_port_to_phy_channel(struct intel_display *display, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { } @@ -81,18 +81,18 @@ static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { } -static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) { } -static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy) { } -static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, +static inline bool bxt_dpio_phy_is_enabled(struct intel_display *display, enum dpio_phy phy) { return false; } -static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, +static inline bool bxt_dpio_phy_verify_state(struct intel_display *display, enum dpio_phy phy) { return true; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 340dfce480b8..198ceda790d2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -589,11 +589,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) return false; - if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv)) + if (!IS_PINEVIEW(dev_priv) && + !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) if (clock->m1 <= clock->m2) return false; - if (!IS_LP(dev_priv)) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) return false; if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -780,7 +783,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, max_n = limit->n.max; /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { - /* based on hardware requirement, prefere larger m1,m2 */ + /* based on hardware requirement, prefer larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; @@ -1000,6 +1003,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1058,7 +1062,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1092,6 +1096,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1128,7 +1133,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, dpll |= DPLL_DVO_2X_MODE; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1234,11 +1239,12 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state, static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) || + ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) return 25; @@ -1268,6 +1274,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; @@ -1329,7 +1336,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, WARN_ON(reduced_clock->p2 != clock->p2); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv)) + intel_panel_use_ssc(display)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -1353,6 +1360,7 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, static int ilk_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1365,7 +1373,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, return 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", dev_priv->display.vbt.lvds_ssc_freq); @@ -1529,6 +1537,7 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state, static int g4x_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1536,7 +1545,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1578,6 +1587,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, static int pnv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1585,7 +1595,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1616,6 +1626,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1623,7 +1634,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -1656,6 +1667,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1663,7 +1675,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, int refclk = 48000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) { + if (intel_panel_use_ssc(display)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", @@ -2212,7 +2224,8 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_crtc_state *crtc_state; crtc_state = intel_crtc_state_alloc(crtc); @@ -2318,12 +2331,13 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) static void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - cur_state = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index f490b2157828..e60497bb8a94 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -173,18 +173,19 @@ void assert_shared_dpll(struct drm_i915_private *i915, struct intel_shared_dpll *pll, bool state) { + struct intel_display *display = &i915->display; bool cur_state; struct intel_dpll_hw_state hw_state; - if (drm_WARN(&i915->drm, !pll, + if (drm_WARN(display->drm, !pll, "asserting DPLL %s with no DPLL\n", str_on_off(state))) return; cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state); - I915_STATE_WARN(i915, cur_state != state, - "%s assertion failure (expected %s, current %s)\n", - pll->info->name, str_on_off(state), - str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "%s assertion failure (expected %s, current %s)\n", + pll->info->name, str_on_off(state), + str_on_off(cur_state)); } static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) @@ -545,14 +546,15 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915, static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; u32 val; bool enabled; - val = intel_de_read(i915, PCH_DREF_CONTROL); + val = intel_de_read(display, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); - I915_STATE_WARN(i915, !enabled, - "PCH refclk assertion failure, should be active but is disabled\n"); + INTEL_DISPLAY_STATE_WARN(display, !enabled, + "PCH refclk assertion failure, should be active but is disabled\n"); } static void ibx_pch_dpll_enable(struct drm_i915_private *i915, @@ -2035,13 +2037,14 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *dpll_hw_state) { + struct intel_display *display = &i915->display; const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; u32 temp; - bxt_port_to_phy_channel(i915, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); /* Non-SSC reference */ intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); @@ -2157,6 +2160,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *dpll_hw_state) { + struct intel_display *display = &i915->display; struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_wakeref_t wakeref; @@ -2165,7 +2169,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, u32 val; bool ret; - bxt_port_to_phy_channel(i915, port, &phy, &ch); + bxt_port_to_phy_channel(display, port, &phy, &ch); wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); @@ -4619,6 +4623,7 @@ verify_single_dpll_state(struct drm_i915_private *i915, struct intel_crtc *crtc, const struct intel_crtc_state *new_crtc_state) { + struct intel_display *display = &i915->display; struct intel_dpll_hw_state dpll_hw_state = {}; u8 pipe_mask; bool active; @@ -4626,22 +4631,22 @@ verify_single_dpll_state(struct drm_i915_private *i915, active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state); if (!pll->info->always_on) { - I915_STATE_WARN(i915, !pll->on && pll->active_mask, - "%s: pll in active use but not on in sw tracking\n", - pll->info->name); - I915_STATE_WARN(i915, pll->on && !pll->active_mask, - "%s: pll is on but not used by any active pipe\n", - pll->info->name); - I915_STATE_WARN(i915, pll->on != active, - "%s: pll on state mismatch (expected %i, found %i)\n", - pll->info->name, pll->on, active); + INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask, + "%s: pll in active use but not on in sw tracking\n", + pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask, + "%s: pll is on but not used by any active pipe\n", + pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, pll->on != active, + "%s: pll on state mismatch (expected %i, found %i)\n", + pll->info->name, pll->on, active); } if (!crtc) { - I915_STATE_WARN(i915, - pll->active_mask & ~pll->state.pipe_mask, - "%s: more active pll users than references: 0x%x vs 0x%x\n", - pll->info->name, pll->active_mask, pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, + pll->active_mask & ~pll->state.pipe_mask, + "%s: more active pll users than references: 0x%x vs 0x%x\n", + pll->info->name, pll->active_mask, pll->state.pipe_mask); return; } @@ -4649,23 +4654,23 @@ verify_single_dpll_state(struct drm_i915_private *i915, pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) - I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask), - "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask), + "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); else - I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask, + "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask), - "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", - pll->info->name, pipe_mask, pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask), + "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", + pll->info->name, pipe_mask, pll->state.pipe_mask); - I915_STATE_WARN(i915, - pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, - sizeof(dpll_hw_state)), - "%s: pll hw state mismatch\n", - pll->info->name); + INTEL_DISPLAY_STATE_WARN(display, + pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, + sizeof(dpll_hw_state)), + "%s: pll hw state mismatch\n", + pll->info->name); } static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll, @@ -4678,6 +4683,7 @@ static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll, void intel_shared_dpll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -4693,16 +4699,16 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state, u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; - I915_STATE_WARN(i915, pll->active_mask & pipe_mask, - "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", - pll->info->name, pipe_name(crtc->pipe), pll->active_mask); + INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask, + "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->active_mask); /* TC ports have both MG/TC and TBT PLL referenced simultaneously */ - I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll, - new_crtc_state->shared_dpll) && - pll->state.pipe_mask & pipe_mask, - "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", - pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask); + INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll, + new_crtc_state->shared_dpll) && + pll->state.pipe_mask & pipe_mask, + "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n", + pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 3a6d99044828..ce8c76e44e6a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -242,7 +242,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915) struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb) { - struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct drm_i915_private *i915 = to_i915(obj->dev); struct drm_i915_gem_object *dpt_obj; struct i915_address_space *vm; diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 3ca29afa5422..bb39eb96e812 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <linux/debugfs.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index da24e041d269..b7b44399adaa 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -4,6 +4,8 @@ * */ +#include <drm/drm_vblank.h> + #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -37,9 +39,16 @@ struct intel_dsb { unsigned int free_pos; /* - * ins_start_offset will help to store start dword of the dsb - * instuction and help in identifying the batch of auto-increment - * register. + * Previously emitted DSB instruction. Used to + * identify/adjust the instruction for indexed + * register writes. + */ + u32 ins[2]; + + /* + * Start of the previously emitted DSB instruction. + * Used to adjust the instruction for indexed + * register writes. */ unsigned int ins_start_offset; @@ -119,6 +128,12 @@ pre_commit_crtc_state(struct intel_atomic_state *state, return old_crtc_state; } +static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state) +{ + return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) - + intel_mode_vdisplay(&crtc_state->hw.adjusted_mode); +} + static int dsb_vtotal(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -215,9 +230,11 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) dsb->free_pos = ALIGN(dsb->free_pos, 2); dsb->ins_start_offset = dsb->free_pos; + dsb->ins[0] = ldw; + dsb->ins[1] = udw; - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]); } static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, @@ -233,10 +250,8 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, if (dsb->free_pos == 0) return false; - prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK; - prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK; + prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK; + prev_reg = dsb->ins[1] & DSB_REG_VALUE_MASK; return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); } @@ -269,8 +284,6 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { - u32 old_val; - /* * For example the buffer will look like below for 3 dwords for auto * increment register: @@ -299,23 +312,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, /* convert to indexed write? */ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { - u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf, - dsb->ins_start_offset + 0); + u32 prev_val = dsb->ins[0]; + + dsb->ins[0] = 1; /* count */ + dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | + i915_mmio_reg_offset(reg); - intel_dsb_buffer_write(&dsb->dsb_buf, - dsb->ins_start_offset + 0, 1); /* count */ + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, + dsb->ins[0]); intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1, - (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | - i915_mmio_reg_offset(reg)); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val); + dsb->ins[1]); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, + prev_val); dsb->free_pos++; } intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val); /* Update the count */ - old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset); - intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1); + dsb->ins[0]++; + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, + dsb->ins[0]); /* if number of data words is odd, then the last dword should be 0.*/ if (dsb->free_pos & 0x1) @@ -370,6 +387,24 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb) intel_dsb_noop(dsb, 4); } +void intel_dsb_interrupt(struct intel_dsb *dsb) +{ + intel_dsb_emit(dsb, 0, + DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT); +} + +void intel_dsb_wait_usec(struct intel_dsb *dsb, int count) +{ + intel_dsb_emit(dsb, count, + DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT); +} + +void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count) +{ + intel_dsb_emit(dsb, count, + DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT); +} + static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb, u32 opcode, int lower, int upper) { @@ -510,6 +545,31 @@ static u32 dsb_error_int_en(struct intel_display *display) return errors; } +void intel_dsb_vblank_evade(struct intel_atomic_state *state, + struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + /* FIXME calibrate sensibly */ + int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20); + int vblank_delay = dsb_vblank_delay(crtc_state); + int start, end; + + if (pre_commit_is_vrr_active(state, crtc)) { + end = intel_vrr_vmin_vblank_start(crtc_state); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + + end = intel_vrr_vmax_vblank_start(crtc_state); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + } else { + end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode); + start = end - vblank_delay - latency; + intel_dsb_wait_scanline_out(state, dsb, start, end); + } +} + static void _intel_dsb_chain(struct intel_atomic_state *state, struct intel_dsb *dsb, struct intel_dsb *chained_dsb, @@ -535,7 +595,7 @@ static void _intel_dsb_chain(struct intel_atomic_state *state, intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id), dsb_error_int_status(display) | DSB_PROG_INT_STATUS | - dsb_error_int_en(display)); + dsb_error_int_en(display) | DSB_PROG_INT_EN); if (ctrl & DSB_WAIT_FOR_VBLANK) { int dewake_scanline = dsb_dewake_scanline_start(state, crtc); @@ -577,6 +637,17 @@ void intel_dsb_chain(struct intel_atomic_state *state, wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0); } +void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, + struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); + int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode, + dsb_vblank_delay(crtc_state)) + 1; + + intel_dsb_wait_usec(dsb, usecs); +} + static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, int hw_dewake_scanline) { @@ -603,7 +674,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), dsb_error_int_status(display) | DSB_PROG_INT_STATUS | - dsb_error_int_en(display)); + dsb_error_int_en(display) | DSB_PROG_INT_EN); intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id), intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); @@ -671,6 +742,9 @@ void intel_dsb_wait(struct intel_dsb *dsb) /* Attempt to reset it */ dsb->free_pos = 0; dsb->ins_start_offset = 0; + dsb->ins[0] = 0; + dsb->ins[1] = 0; + intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0); intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), @@ -706,10 +780,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, if (!i915->display.params.enable_dsb) return NULL; - /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */ - if (!IS_ENABLED(I915)) - return NULL; - dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; @@ -727,8 +797,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, dsb->id = dsb_id; dsb->crtc = crtc; dsb->size = size / 4; /* in dwords */ - dsb->free_pos = 0; - dsb->ins_start_offset = 0; dsb->chicken = dsb_chicken(state, crtc); dsb->hw_dewake_scanline = @@ -763,12 +831,29 @@ void intel_dsb_cleanup(struct intel_dsb *dsb) void intel_dsb_irq_handler(struct intel_display *display, enum pipe pipe, enum intel_dsb_id dsb_id) { - struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); u32 tmp, errors; tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id)); intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp); + if (tmp & DSB_PROG_INT_STATUS) { + spin_lock(&display->drm->event_lock); + + if (crtc->dsb_event) { + /* + * Update vblank counter/timestmap in case it + * hasn't been done yet for this frame. + */ + drm_crtc_accurate_vblank_count(&crtc->base); + + drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event); + crtc->dsb_event = NULL; + } + + spin_unlock(&display->drm->event_lock); + } + errors = tmp & dsb_error_int_status(display); if (errors) drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n", diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index c352c12aa59f..33e0fc2ab380 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -39,12 +39,19 @@ void intel_dsb_reg_write_masked(struct intel_dsb *dsb, void intel_dsb_noop(struct intel_dsb *dsb, int count); void intel_dsb_nonpost_start(struct intel_dsb *dsb); void intel_dsb_nonpost_end(struct intel_dsb *dsb); +void intel_dsb_interrupt(struct intel_dsb *dsb); +void intel_dsb_wait_usec(struct intel_dsb *dsb, int count); +void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count); +void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, + struct intel_dsb *dsb); void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); +void intel_dsb_vblank_evade(struct intel_atomic_state *state, + struct intel_dsb *dsb); void intel_dsb_chain(struct intel_atomic_state *state, struct intel_dsb *dsb, struct intel_dsb *chained_dsb, diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index bd5888ce4852..0be46c6c9611 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -76,7 +76,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, if (fixed_mode->clock > max_dotclk) return MODE_CLOCK_HIGH; - return intel_mode_valid_max_plane_size(dev_priv, mode, false); + return intel_mode_valid_max_plane_size(dev_priv, mode, 1); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f0e3be0fe420..e8129a720210 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -323,6 +323,7 @@ enum { static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, int gpio, bool value) { + struct intel_display *display = &dev_priv->display; int index; if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) @@ -367,7 +368,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_AVEE_EN_2: index = gpio == MIPI_AVEE_EN_1 ? 1 : 2; - intel_de_rmw(dev_priv, GPIO(dev_priv, index), + intel_de_rmw(display, GPIO(display, index), GPIO_CLOCK_VAL_OUT, GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0)); @@ -376,7 +377,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_VIO_EN_2: index = gpio == MIPI_VIO_EN_1 ? 1 : 2; - intel_de_rmw(dev_priv, GPIO(dev_priv, index), + intel_de_rmw(display, GPIO(display, index), GPIO_DATA_VAL_OUT, GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT | GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0)); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 12e7628cbecf..2d5ffb37eac9 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -31,6 +31,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -416,6 +417,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, struct intel_dvo *intel_dvo, const struct intel_dvo_device *dvo) { + struct intel_display *display = &dev_priv->display; struct i2c_adapter *i2c; u32 dpll[I915_MAX_PIPES]; enum pipe pipe; @@ -427,7 +429,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * special cases, but otherwise default to what's defined * in the spec. */ - if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio)) + if (intel_gmbus_is_valid_pin(display, dvo->gpio)) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GMBUS_PIN_SSC; @@ -439,7 +441,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - i2c = intel_gmbus_get_adapter(dev_priv, gpio); + i2c = intel_gmbus_get_adapter(display, gpio); intel_dvo->dev = *dvo; @@ -488,6 +490,7 @@ static bool intel_dvo_probe(struct drm_i915_private *i915, void intel_dvo_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_connector *connector; struct intel_encoder *encoder; struct intel_dvo *intel_dvo; @@ -548,7 +551,7 @@ void intel_dvo_init(struct drm_i915_private *i915) drm_connector_init_with_ddc(&i915->drm, &connector->base, &intel_dvo_connector_funcs, intel_dvo_connector_type(&intel_dvo->dev), - intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC)); + intel_gmbus_get_adapter(display, GMBUS_PIN_DPC)); drm_connector_helper_add(&connector->base, &intel_dvo_connector_helper_funcs); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 35557d98d7a7..6a7060889f40 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -3,15 +3,16 @@ * Copyright © 2021 Intel Corporation */ -#include <drm/drm_blend.h> -#include <drm/drm_modeset_helper.h> - #include <linux/dma-fence.h> #include <linux/dma-resv.h> -#include "gem/i915_gem_object.h" +#include <drm/drm_blend.h> +#include <drm/drm_gem.h> +#include <drm/drm_modeset_helper.h> + #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpt.h" @@ -44,6 +45,14 @@ static const struct drm_format_info skl_ccs_formats[] = { .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, }; /* @@ -66,6 +75,30 @@ static const struct drm_format_info gen12_ccs_formats[] = { { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_YUYV, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, @@ -101,31 +134,79 @@ static const struct drm_format_info gen12_ccs_formats[] = { */ static const struct drm_format_info gen12_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 3, + .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; static const struct drm_format_info gen12_flat_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2, + .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; @@ -268,7 +349,7 @@ static const struct intel_modifier_desc intel_modifiers[] = { .plane_caps = INTEL_PLANE_CAP_TILING_Y, }, { .modifier = I915_FORMAT_MOD_X_TILED, - .display_ver = DISPLAY_VER_ALL, + .display_ver = { 0, 29 }, .plane_caps = INTEL_PLANE_CAP_TILING_X, }, { .modifier = DRM_FORMAT_MOD_LINEAR, @@ -1237,7 +1318,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane, int plane_width, int *x, int *y) { - struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); int ret; ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane); @@ -1261,7 +1342,7 @@ static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int co * fb layout agrees with the fence layout. We already check that the * fb stride matches the fence stride elsewhere. */ - if (color_plane == 0 && i915_gem_object_is_tiled(obj) && + if (color_plane == 0 && intel_bo_is_tiled(obj) && (*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) { drm_dbg_kms(fb->base.dev, "bad fb plane %d offset: 0x%x\n", @@ -1581,7 +1662,7 @@ static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb) int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb) { - struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); u32 gtt_offset_rotated = 0; u32 gtt_offset_remapped = 0; unsigned int max_size = 0; @@ -1654,10 +1735,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * max_size = max(max_size, offset + size); } - if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) { + if (mul_u32_u32(max_size, tile_size) > obj->size) { drm_dbg_kms(&i915->drm, "fb too big for bo (need %llu bytes, have %zu bytes)\n", - mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size); + mul_u32_u32(max_size, tile_size), obj->size); return -EINVAL; } @@ -1881,7 +1962,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intel_frontbuffer_put(intel_fb->frontbuffer); - intel_fb_bo_framebuffer_fini(intel_fb_obj(fb)); + intel_fb_bo_framebuffer_fini(intel_fb_bo(fb)); kfree(intel_fb); } @@ -1890,16 +1971,16 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev); + struct drm_gem_object *obj = intel_fb_bo(fb); + struct intel_display *display = to_intel_display(obj->dev); - if (i915_gem_object_is_userptr(obj)) { - drm_dbg(&i915->drm, + if (intel_bo_is_userptr(obj)) { + drm_dbg(display->drm, "attempting to use a userptr for a framebuffer, denied\n"); return -EINVAL; } - return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle); + return drm_gem_handle_create(file, obj, handle); } struct frontbuffer_fence_cb { @@ -1923,7 +2004,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_clip_rect *clips, unsigned int num_clips) { - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); struct intel_frontbuffer *front = to_intel_frontbuffer(fb); struct dma_fence *fence; struct frontbuffer_fence_cb *cb; @@ -1932,10 +2013,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, if (!atomic_read(&front->bits)) return 0; - if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false))) + if (dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(false))) goto flush; - ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false), + ret = dma_resv_get_singleton(obj->resv, dma_resv_usage_rw(false), &fence); if (ret || !fence) goto flush; @@ -1962,7 +2043,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, return ret; flush: - i915_gem_object_flush_if_display(obj); + intel_bo_flush_if_display(obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); return ret; } @@ -1974,10 +2055,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { }; int intel_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev); + struct drm_i915_private *dev_priv = to_i915(obj->dev); struct drm_framebuffer *fb = &intel_fb->base; u32 max_stride; int ret = -EINVAL; @@ -2053,7 +2134,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } } - fb->obj[i] = intel_bo_to_drm_bo(obj); + fb->obj[i] = obj; } ret = intel_fill_fb_info(dev_priv, intel_fb); @@ -2097,7 +2178,7 @@ intel_user_framebuffer_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *user_mode_cmd) { struct drm_framebuffer *fb; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; struct drm_i915_private *i915 = to_i915(dev); @@ -2106,13 +2187,13 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_CAST(obj); fb = intel_framebuffer_create(obj, &mode_cmd); - drm_gem_object_put(intel_bo_to_drm_bo(obj)); + drm_gem_object_put(obj); return fb; } struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, +intel_framebuffer_create(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct intel_framebuffer *intel_fb; @@ -2132,3 +2213,8 @@ err: kfree(intel_fb); return ERR_PTR(ret); } + +struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb) +{ + return fb ? fb->obj[0] : NULL; +} diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 827be3f7934c..d78993e5eb62 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -12,6 +12,7 @@ struct drm_device; struct drm_file; struct drm_framebuffer; +struct drm_gem_object; struct drm_i915_gem_object; struct drm_i915_private; struct drm_mode_fb_cmd2; @@ -85,9 +86,12 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio int intel_plane_compute_gtt(struct intel_plane_state *plane_state); int intel_framebuffer_init(struct intel_framebuffer *ifb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * +intel_framebuffer_create(struct drm_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); @@ -97,4 +101,6 @@ bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier); +struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c index 4be09541e509..810ca6ff8640 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.c +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c @@ -11,15 +11,16 @@ #include "intel_fb.h" #include "intel_fb_bo.h" -void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { /* Nothing to do for i915 */ } int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *_obj, struct drm_mode_fb_cmd2 *mode_cmd) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int tiling, stride; @@ -74,7 +75,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, return 0; } -struct drm_i915_gem_object * +struct drm_gem_object * intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, struct drm_file *filp, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -93,5 +94,5 @@ intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, return ERR_PTR(-EREMOTE); } - return obj; + return intel_bo_to_drm_bo(obj); } diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h index 232bf898b013..e71acd1bcb24 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_bo.h +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h @@ -7,18 +7,18 @@ #define __INTEL_FB_BO_H__ struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; +struct drm_mode_fb_cmd2; struct intel_framebuffer; -void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj); +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj); int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -struct drm_i915_gem_object * +struct drm_gem_object * intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 575b271e012b..d3a86f9c6bc8 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -26,7 +26,8 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb, { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *_obj = intel_fb_bo(fb); + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; struct i915_vma *vma; int ret; @@ -111,7 +112,8 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *_obj = intel_fb_bo(fb); + struct drm_i915_gem_object *obj = to_intel_bo(_obj); intel_wakeref_t wakeref; struct i915_gem_ww_ctx ww; struct i915_vma *vma; @@ -274,9 +276,11 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) * will trigger might_sleep() even if it won't actually sleep, * which is the case when the fb has already been pinned. */ - if (intel_plane_needs_physical(plane)) - plane_state->phys_dma_addr = - i915_gem_object_get_dma_address(intel_fb_obj(&fb->base), 0); + if (intel_plane_needs_physical(plane)) { + struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + + plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0); + } } else { unsigned int alignment = intel_plane_fb_min_alignment(plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 52b79bacef4d..df05904bac8a 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -38,6 +38,7 @@ * forcibly disable it to allow proper screen updates. */ +#include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_blend.h> @@ -1346,7 +1347,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, /* Wa_14016291713 */ if ((IS_DISPLAY_VER(display, 12, 13) || - IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) && + IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) && crtc_state->has_psr && !crtc_state->has_panel_replay) { plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; return 0; @@ -1792,7 +1793,6 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) { struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); mutex_lock(&fbc->lock); @@ -1805,7 +1805,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) intel_fbc_deactivate(fbc, "FIFO underrun"); if (!fbc->flip_pending) - intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, fbc->state.plane->pipe)); __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 49a1ac4f5491..00852ff5b247 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -41,12 +41,11 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> -#include "gem/i915_gem_mman.h" -#include "gem/i915_gem_object.h" - #include "i915_drv.h" +#include "intel_bo.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" @@ -129,10 +128,9 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct intel_fbdev *fbdev = to_intel_fbdev(info->par); - struct drm_gem_object *bo = drm_gem_fb_get_obj(&fbdev->fb->base, 0); - struct drm_i915_gem_object *obj = to_intel_bo(bo); + struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0); - return i915_gem_fb_mmap(obj, vma); + return intel_bo_fb_mmap(obj, vma); } static void intel_fbdev_fb_destroy(struct fb_info *info) @@ -187,7 +185,7 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; int ret; mutex_lock(&ifbdev->hpd_lock); @@ -209,7 +207,7 @@ static int intelfb_create(struct drm_fb_helper *helper, drm_framebuffer_put(&fb->base); fb = NULL; } - if (!fb || drm_WARN_ON(dev, !intel_fb_obj(&fb->base))) { + if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) { drm_dbg_kms(&dev_priv->drm, "no BIOS fb, allocating a new one\n"); fb = intel_fbdev_fb_alloc(helper, sizes); @@ -247,7 +245,7 @@ static int intelfb_create(struct drm_fb_helper *helper, info->fbops = &intelfb_ops; - obj = intel_fb_obj(&fb->base); + obj = intel_fb_bo(&fb->base); ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma); if (ret) @@ -259,7 +257,7 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (!i915_gem_object_is_shmem(obj) && !prealloc) + if (!intel_bo_is_shmem(obj) && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -323,8 +321,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct drm_i915_gem_object *obj = - intel_fb_obj(plane_state->uapi.fb); + struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb); if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, @@ -340,12 +337,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, continue; } - if (intel_bo_to_drm_bo(obj)->size > max_size) { + if (obj->size > max_size) { drm_dbg_kms(&i915->drm, "found possible fb from [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fb = to_intel_framebuffer(plane_state->uapi.fb); - max_size = intel_bo_to_drm_bo(obj)->size; + max_size = obj->size; } } @@ -533,7 +530,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * full of whatever garbage was left in there. */ if (state == FBINFO_STATE_RUNNING && - !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base))) + !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base))) memset_io(info->screen_base, 0, info->screen_size); drm_fb_helper_set_suspend(&ifbdev->helper, state); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 497525ef9668..4991c35a2632 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -9,6 +9,7 @@ #include "i915_drv.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fbdev_fb.h" struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, @@ -60,15 +61,16 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, return ERR_PTR(-ENOMEM); } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd); i915_gem_object_put(obj); return to_intel_framebuffer(fb); } int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) + struct drm_gem_object *_obj, struct i915_vma *vma) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; void __iomem *vaddr; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h index 4832fe688fbf..e502ae375fc0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h @@ -8,7 +8,7 @@ struct drm_fb_helper; struct drm_fb_helper_surface_size; -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; struct fb_info; struct i915_vma; @@ -16,6 +16,6 @@ struct i915_vma; struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma); + struct drm_gem_object *obj, struct i915_vma *vma); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 222cd0e1a2bc..98e1a3606227 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -26,9 +26,10 @@ struct intel_fdi_funcs { static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - if (HAS_DDI(dev_priv)) { + if (HAS_DDI(display)) { /* * DDI does not have a specific FDI_TX register. * @@ -36,14 +37,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, * so pipe->transcoder cast is fine here. */ enum transcoder cpu_transcoder = (enum transcoder)pipe; - cur_state = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; + cur_state = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; } else { - cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; + cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; } - I915_STATE_WARN(dev_priv, cur_state != state, - "FDI TX state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI TX state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -59,12 +60,13 @@ void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) static void assert_fdi_rx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { + struct intel_display *display = &dev_priv->display; bool cur_state; - cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; - I915_STATE_WARN(dev_priv, cur_state != state, - "FDI RX state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI RX state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -80,6 +82,7 @@ void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) { + struct intel_display *display = &i915->display; bool cur_state; /* ILK FDI PLL is always enabled */ @@ -87,23 +90,24 @@ void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ - if (HAS_DDI(i915)) + if (HAS_DDI(display)) return; - cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; - I915_STATE_WARN(i915, !cur_state, - "FDI TX PLL assertion failure, should be active but is disabled\n"); + cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, !cur_state, + "FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll(struct drm_i915_private *i915, enum pipe pipe, bool state) { + struct intel_display *display = &i915->display; bool cur_state; - cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; - I915_STATE_WARN(i915, cur_state != state, - "FDI RX PLL assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "FDI RX PLL assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) @@ -137,6 +141,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc, */ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state; const struct intel_crtc_state *new_crtc_state; @@ -145,7 +150,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3) return 0; - crtc = intel_crtc_for_pipe(i915, PIPE_C); + crtc = intel_crtc_for_pipe(display, PIPE_C); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state) return 0; @@ -157,7 +162,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) if (!old_crtc_state->fdi_lanes) return 0; - crtc = intel_crtc_for_pipe(i915, PIPE_B); + crtc = intel_crtc_for_pipe(display, PIPE_B); new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(new_crtc_state)) return PTR_ERR(new_crtc_state); @@ -184,6 +189,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config, enum pipe *pipe_to_reduce) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->uapi.state; struct intel_crtc *other_crtc; @@ -223,7 +229,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); + other_crtc = intel_crtc_for_pipe(display, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -244,7 +250,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); + other_crtc = intel_crtc_for_pipe(display, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 8949fbb1cc60..cda1daf4cdea 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -57,6 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; enum pipe pipe; @@ -64,7 +65,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; @@ -75,6 +76,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) static bool cpt_can_enable_serr_int(struct drm_device *dev) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; @@ -82,7 +84,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(display, pipe); if (crtc->pch_fifo_underrun_disabled) return false; @@ -93,6 +95,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = PIPESTAT(dev_priv, crtc->pipe); u32 enable_mask; @@ -106,7 +109,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); intel_de_posting_read(dev_priv, reg); - trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); + trace_intel_cpu_fifo_underrun(display, crtc->pipe); drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe)); } @@ -147,6 +150,7 @@ static void ilk_set_fifo_underrun_reporting(struct drm_device *dev, static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT); @@ -159,7 +163,7 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc) intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); intel_de_posting_read(dev_priv, GEN7_ERR_INT); - trace_intel_cpu_fifo_underrun(dev_priv, pipe); + trace_intel_cpu_fifo_underrun(display, pipe); drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe)); } @@ -188,35 +192,15 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, } } -static u32 -icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv) -{ - u32 mask = PIPE_STATUS_UNDERRUN; - - if (DISPLAY_VER(dev_priv) >= 13) - mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD | - PIPE_STATUS_HARD_UNDERRUN_XELPD | - PIPE_STATUS_PORT_UNDERRUN_XELPD; - - return mask; -} - static void bdw_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - u32 mask = gen8_de_pipe_underrun_mask(dev_priv); - if (enable) { - if (DISPLAY_VER(dev_priv) >= 11) - intel_de_write(dev_priv, - ICL_PIPESTATUS(dev_priv, pipe), - icl_pipe_status_underrun_mask(dev_priv)); - - bdw_enable_pipe_irq(dev_priv, pipe, mask); - } else { - bdw_disable_pipe_irq(dev_priv, pipe, mask); - } + if (enable) + bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + else + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, @@ -235,6 +219,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; u32 serr_int = intel_de_read(dev_priv, SERR_INT); @@ -248,7 +233,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); intel_de_posting_read(dev_priv, SERR_INT); - trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); + trace_intel_pch_fifo_underrun(display, pch_transcoder); drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n", pipe_name(pch_transcoder)); } @@ -282,8 +267,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); @@ -351,8 +337,9 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pch_transcoder, bool enable) { + struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, pch_transcoder); + intel_crtc_for_pipe(display, pch_transcoder); unsigned long flags; bool old; @@ -395,8 +382,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - u32 underruns = 0; + struct intel_display *display = &dev_priv->display; + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); /* We may be called too early in init, thanks BIOS! */ if (crtc == NULL) @@ -407,37 +394,10 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, crtc->cpu_fifo_underrun_disabled) return; - /* - * Starting with display version 11, the PIPE_STAT register records - * whether an underrun has happened, and on XELPD+, it will also record - * whether the underrun was soft/hard and whether it was triggered by - * the downstream port logic. We should clear these bits (which use - * write-1-to-clear logic) too. - * - * Note that although the IIR gives us the same underrun and soft/hard - * information, PIPE_STAT is the only place we can find out whether - * the underrun was caused by the downstream port. - */ - if (DISPLAY_VER(dev_priv) >= 11) { - underruns = intel_de_read(dev_priv, - ICL_PIPESTATUS(dev_priv, pipe)) & - icl_pipe_status_underrun_mask(dev_priv); - intel_de_write(dev_priv, ICL_PIPESTATUS(dev_priv, pipe), - underruns); - } - if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) { - trace_intel_cpu_fifo_underrun(dev_priv, pipe); - - if (DISPLAY_VER(dev_priv) >= 11) - drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n", - pipe_name(pipe), - underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "", - underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "", - underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "", - underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : ""); - else - drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); + trace_intel_cpu_fifo_underrun(display, pipe); + + drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } intel_fbc_handle_fifo_underrun_irq(&dev_priv->display); @@ -455,9 +415,11 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pch_transcoder) { + struct intel_display *display = &dev_priv->display; + if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) { - trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); + trace_intel_pch_fifo_underrun(display, pch_transcoder); drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n", pipe_name(pch_transcoder)); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index af4576dee92a..6ed5f726ee60 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -55,9 +55,11 @@ * cancelled as soon as busyness is detected. */ -#include "gem/i915_gem_object_frontbuffer.h" +#include <drm/drm_gem.h> + #include "i915_active.h" #include "i915_drv.h" +#include "intel_bo.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dp.h" @@ -93,7 +95,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915, if (!frontbuffer_bits) return; - trace_intel_frontbuffer_flush(i915, frontbuffer_bits, origin); + trace_intel_frontbuffer_flush(display, frontbuffer_bits, origin); might_sleep(); intel_td_flush(i915); @@ -173,17 +175,17 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); - struct intel_display *display = &i915->display; + struct intel_display *display = to_intel_display(front->obj->dev); + struct drm_i915_private *i915 = to_i915(display->drm); if (origin == ORIGIN_CS) { - spin_lock(&i915->display.fb_tracking.lock); - i915->display.fb_tracking.busy_bits |= frontbuffer_bits; - i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->display.fb_tracking.lock); + spin_lock(&display->fb_tracking.lock); + display->fb_tracking.busy_bits |= frontbuffer_bits; + display->fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&display->fb_tracking.lock); } - trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin); + trace_intel_frontbuffer_invalidate(display, frontbuffer_bits, origin); might_sleep(); intel_psr_invalidate(display, frontbuffer_bits, origin); @@ -195,14 +197,15 @@ void __intel_fb_flush(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { - struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); + struct intel_display *display = to_intel_display(front->obj->dev); + struct drm_i915_private *i915 = to_i915(display->drm); if (origin == ORIGIN_CS) { - spin_lock(&i915->display.fb_tracking.lock); + spin_lock(&display->fb_tracking.lock); /* Filter out new bits since rendering started. */ - frontbuffer_bits &= i915->display.fb_tracking.busy_bits; - i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->display.fb_tracking.lock); + frontbuffer_bits &= display->fb_tracking.busy_bits; + display->fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&display->fb_tracking.lock); } if (frontbuffer_bits) @@ -214,7 +217,7 @@ static void intel_frontbuffer_flush_work(struct work_struct *work) struct intel_frontbuffer *front = container_of(work, struct intel_frontbuffer, flush_work); - i915_gem_object_flush_if_display(front->obj); + intel_bo_flush_if_display(front->obj); intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); intel_frontbuffer_put(front); } @@ -255,31 +258,32 @@ static void frontbuffer_retire(struct i915_active *ref) } static void frontbuffer_release(struct kref *ref) - __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock) + __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock) { struct intel_frontbuffer *ret, *front = container_of(ref, typeof(*front), ref); - struct drm_i915_gem_object *obj = front->obj; + struct drm_gem_object *obj = front->obj; + struct intel_display *display = to_intel_display(obj->dev); - drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits)); + drm_WARN_ON(display->drm, atomic_read(&front->bits)); - i915_ggtt_clear_scanout(obj); + i915_ggtt_clear_scanout(to_intel_bo(obj)); - ret = i915_gem_object_set_frontbuffer(obj, NULL); - drm_WARN_ON(&intel_bo_to_i915(obj)->drm, ret); - spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock); + ret = intel_bo_set_frontbuffer(obj, NULL); + drm_WARN_ON(display->drm, ret); + spin_unlock(&display->fb_tracking.lock); i915_active_fini(&front->write); kfree_rcu(front, rcu); } struct intel_frontbuffer * -intel_frontbuffer_get(struct drm_i915_gem_object *obj) +intel_frontbuffer_get(struct drm_gem_object *obj) { - struct drm_i915_private *i915 = intel_bo_to_i915(obj); + struct drm_i915_private *i915 = to_i915(obj->dev); struct intel_frontbuffer *front, *cur; - front = i915_gem_object_get_frontbuffer(obj); + front = intel_bo_get_frontbuffer(obj); if (front) return front; @@ -297,7 +301,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work); spin_lock(&i915->display.fb_tracking.lock); - cur = i915_gem_object_set_frontbuffer(obj, front); + cur = intel_bo_set_frontbuffer(obj, front); spin_unlock(&i915->display.fb_tracking.lock); if (cur != front) kfree(front); @@ -308,7 +312,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, - &intel_bo_to_i915(front->obj)->display.fb_tracking.lock); + &to_intel_display(front->obj->dev)->fb_tracking.lock); } /** @@ -337,13 +341,17 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { - drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm, + struct intel_display *display = to_intel_display(old->obj->dev); + + drm_WARN_ON(display->drm, !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { - drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm, + struct intel_display *display = to_intel_display(new->obj->dev); + + drm_WARN_ON(display->drm, atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index abb51e8bb920..6237780a9f68 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -30,6 +30,7 @@ #include "i915_active_types.h" +struct drm_gem_object; struct drm_i915_private; enum fb_op_origin { @@ -44,7 +45,7 @@ struct intel_frontbuffer { struct kref ref; atomic_t bits; struct i915_active write; - struct drm_i915_gem_object *obj; + struct drm_gem_object *obj; struct rcu_head rcu; struct work_struct flush_work; @@ -77,7 +78,7 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915, void intel_frontbuffer_put(struct intel_frontbuffer *front); struct intel_frontbuffer * -intel_frontbuffer_get(struct drm_i915_gem_object *obj); +intel_frontbuffer_get(struct drm_gem_object *obj); void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 6470f75106bd..e3d938c7f83e 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -48,7 +48,7 @@ struct intel_gmbus { u32 reg0; i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; - struct drm_i915_private *i915; + struct intel_display *display; }; enum gmbus_gpio { @@ -149,9 +149,10 @@ static const struct gmbus_pin gmbus_pins_mtp[] = { [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, }; -static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, +static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display, unsigned int pin) { + struct drm_i915_private *i915 = to_i915(display->drm); const struct gmbus_pin *pins; size_t size; @@ -173,7 +174,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { pins = gmbus_pins_bxt; size = ARRAY_SIZE(gmbus_pins_bxt); - } else if (DISPLAY_VER(i915) == 9) { + } else if (DISPLAY_VER(display) == 9) { pins = gmbus_pins_skl; size = ARRAY_SIZE(gmbus_pins_skl); } else if (IS_BROADWELL(i915)) { @@ -190,9 +191,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, return &pins[pin]; } -bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin) +bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin) { - return get_gmbus_pin(i915, pin); + return get_gmbus_pin(display, pin); } /* Intel GPIO access functions */ @@ -206,42 +207,45 @@ to_intel_gmbus(struct i2c_adapter *i2c) } void -intel_gmbus_reset(struct drm_i915_private *i915) +intel_gmbus_reset(struct intel_display *display) { - intel_de_write(i915, GMBUS0(i915), 0); - intel_de_write(i915, GMBUS4(i915), 0); + intel_de_write(display, GMBUS0(display), 0); + intel_de_write(display, GMBUS4(display), 0); } -static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, +static void pnv_gmbus_clock_gating(struct intel_display *display, bool enable) { /* When using bit bashing for I2C, this bit needs to be set to 1 */ - intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, + intel_de_rmw(display, DSPCLK_GATE_D(display), + PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } -static void pch_gmbus_clock_gating(struct drm_i915_private *i915, +static void pch_gmbus_clock_gating(struct intel_display *display, bool enable) { - intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, + intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, + PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } -static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, +static void bxt_gmbus_clock_gating(struct intel_display *display, bool enable) { - intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, + intel_de_rmw(display, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, !enable ? BXT_GMBUS_GATING_DIS : 0); } static u32 get_reserved(struct intel_gmbus *bus) { - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ if (!IS_I830(i915) && !IS_I845G(i915)) - reserved = intel_de_read_notrace(i915, bus->gpio_reg) & + reserved = intel_de_read_notrace(display, bus->gpio_reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); return reserved; @@ -250,31 +254,31 @@ static u32 get_reserved(struct intel_gmbus *bus) static int get_clock(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); - intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); - intel_de_write_notrace(i915, bus->gpio_reg, reserved); + intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); + intel_de_write_notrace(display, bus->gpio_reg, reserved); - return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; + return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); - intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); - intel_de_write_notrace(i915, bus->gpio_reg, reserved); + intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); + intel_de_write_notrace(display, bus->gpio_reg, reserved); - return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; + return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); u32 clock_bits; @@ -284,14 +288,14 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits); - intel_de_posting_read(i915, bus->gpio_reg); + intel_de_write_notrace(display, bus->gpio_reg, reserved | clock_bits); + intel_de_posting_read(display, bus->gpio_reg); } static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; u32 reserved = get_reserved(bus); u32 data_bits; @@ -301,20 +305,21 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits); - intel_de_posting_read(i915, bus->gpio_reg); + intel_de_write_notrace(display, bus->gpio_reg, reserved | data_bits); + intel_de_posting_read(display, bus->gpio_reg); } static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); - intel_gmbus_reset(i915); + intel_gmbus_reset(display); if (IS_PINEVIEW(i915)) - pnv_gmbus_clock_gating(i915, false); + pnv_gmbus_clock_gating(display, false); set_data(bus, 1); set_clock(bus, 1); @@ -326,13 +331,14 @@ static void intel_gpio_post_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); set_data(bus, 1); set_clock(bus, 1); if (IS_PINEVIEW(i915)) - pnv_gmbus_clock_gating(i915, true); + pnv_gmbus_clock_gating(display, true); } static void @@ -355,16 +361,17 @@ intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg) algo->data = bus; } -static bool has_gmbus_irq(struct drm_i915_private *i915) +static bool has_gmbus_irq(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); /* * encoder->shutdown() may want to use GMBUS * after irqs have already been disabled. */ - return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); + return HAS_GMBUS_IRQ(display) && intel_irqs_enabled(i915); } -static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) +static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en) { DEFINE_WAIT(wait); u32 gmbus2; @@ -374,21 +381,21 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!has_gmbus_irq(i915)) + if (!has_gmbus_irq(display)) irq_en = 0; - add_wait_queue(&i915->display.gmbus.wait_queue, &wait); - intel_de_write_fw(i915, GMBUS4(i915), irq_en); + add_wait_queue(&display->gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, + ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, 2); if (ret) - ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, + ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, 50); - intel_de_write_fw(i915, GMBUS4(i915), 0); - remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), 0); + remove_wait_queue(&display->gmbus.wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) return -ENXIO; @@ -397,7 +404,7 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) } static int -gmbus_wait_idle(struct drm_i915_private *i915) +gmbus_wait_idle(struct intel_display *display) { DEFINE_WAIT(wait); u32 irq_enable; @@ -405,33 +412,33 @@ gmbus_wait_idle(struct drm_i915_private *i915) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (has_gmbus_irq(i915)) + if (has_gmbus_irq(display)) irq_enable = GMBUS_IDLE_EN; - add_wait_queue(&i915->display.gmbus.wait_queue, &wait); - intel_de_write_fw(i915, GMBUS4(i915), irq_enable); + add_wait_queue(&display->gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), irq_enable); - ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10); + ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10); - intel_de_write_fw(i915, GMBUS4(i915), 0); - remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(display, GMBUS4(display), 0); + remove_wait_queue(&display->gmbus.wait_queue, &wait); return ret; } -static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915) +static unsigned int gmbus_max_xfer_size(struct intel_display *display) { - return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + return DISPLAY_VER(display) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; } static int -gmbus_xfer_read_chunk(struct drm_i915_private *i915, +gmbus_xfer_read_chunk(struct intel_display *display, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus0_reg, u32 gmbus1_index) { unsigned int size = len; - bool burst_read = len > gmbus_max_xfer_size(i915); + bool burst_read = len > gmbus_max_xfer_size(display); bool extra_byte_added = false; if (burst_read) { @@ -444,21 +451,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, len++; } size = len % 256 + 256; - intel_de_write_fw(i915, GMBUS0(i915), + intel_de_write_fw(display, GMBUS0(display), gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } - intel_de_write_fw(i915, GMBUS1(i915), + intel_de_write_fw(display, GMBUS1(display), gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; - ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; - val = intel_de_read_fw(i915, GMBUS3(i915)); + val = intel_de_read_fw(display, GMBUS3(display)); do { if (extra_byte_added && len == 1) break; @@ -469,7 +476,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, if (burst_read && len == size - 4) /* Reset the override bit */ - intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg); + intel_de_write_fw(display, GMBUS0(display), gmbus0_reg); } return 0; @@ -486,9 +493,10 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915, #define INTEL_GMBUS_BURST_READ_MAX_LEN 767U static int -gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, +gmbus_xfer_read(struct intel_display *display, struct i2c_msg *msg, u32 gmbus0_reg, u32 gmbus1_index) { + struct drm_i915_private *i915 = to_i915(display->drm); u8 *buf = msg->buf; unsigned int rx_size = msg->len; unsigned int len; @@ -498,9 +506,9 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, if (HAS_GMBUS_BURST_READ(i915)) len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); else - len = min(rx_size, gmbus_max_xfer_size(i915)); + len = min(rx_size, gmbus_max_xfer_size(display)); - ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len, + ret = gmbus_xfer_read_chunk(display, msg->addr, buf, len, gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -513,7 +521,7 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, } static int -gmbus_xfer_write_chunk(struct drm_i915_private *i915, +gmbus_xfer_write_chunk(struct intel_display *display, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus1_index) { @@ -526,8 +534,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, len -= 1; } - intel_de_write_fw(i915, GMBUS3(i915), val); - intel_de_write_fw(i915, GMBUS1(i915), + intel_de_write_fw(display, GMBUS3(display), val); + intel_de_write_fw(display, GMBUS1(display), gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -537,9 +545,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); - intel_de_write_fw(i915, GMBUS3(i915), val); + intel_de_write_fw(display, GMBUS3(display), val); - ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; } @@ -548,7 +556,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915, } static int -gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, +gmbus_xfer_write(struct intel_display *display, struct i2c_msg *msg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -557,9 +565,9 @@ gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, int ret; do { - len = min(tx_size, gmbus_max_xfer_size(i915)); + len = min(tx_size, gmbus_max_xfer_size(display)); - ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len, + ret = gmbus_xfer_write_chunk(display, msg->addr, buf, len, gmbus1_index); if (ret) return ret; @@ -586,7 +594,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, +gmbus_index_xfer(struct intel_display *display, struct i2c_msg *msgs, u32 gmbus0_reg) { u32 gmbus1_index = 0; @@ -602,17 +610,17 @@ gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, /* GMBUS5 holds 16-bit index */ if (gmbus5) - intel_de_write_fw(i915, GMBUS5(i915), gmbus5); + intel_de_write_fw(display, GMBUS5(display), gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg, + ret = gmbus_xfer_read(display, &msgs[1], gmbus0_reg, gmbus1_index); else - ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index); + ret = gmbus_xfer_write(display, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) - intel_de_write_fw(i915, GMBUS5(i915), 0); + intel_de_write_fw(display, GMBUS5(display), 0); return ret; } @@ -622,34 +630,35 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, u32 gmbus0_source) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); int i = 0, inc, try = 0; int ret = 0; /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - bxt_gmbus_clock_gating(i915, false); + bxt_gmbus_clock_gating(display, false); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) - pch_gmbus_clock_gating(i915, false); + pch_gmbus_clock_gating(display, false); retry: - intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0); + intel_de_write_fw(display, GMBUS0(display), gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(i915, &msgs[i], + ret = gmbus_index_xfer(display, &msgs[i], gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(i915, &msgs[i], + ret = gmbus_xfer_read(display, &msgs[i], gmbus0_source | bus->reg0, 0); } else { - ret = gmbus_xfer_write(i915, &msgs[i], 0); + ret = gmbus_xfer_write(display, &msgs[i], 0); } if (!ret) - ret = gmbus_wait(i915, + ret = gmbus_wait(display, GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; @@ -661,19 +670,19 @@ retry: * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ - intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + intel_de_write_fw(display, GMBUS1(display), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ - if (gmbus_wait_idle(i915)) { - drm_dbg_kms(&i915->drm, + if (gmbus_wait_idle(display)) { + drm_dbg_kms(display->drm, "GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS0(display), 0); ret = ret ?: i; goto out; @@ -692,8 +701,8 @@ clear_err: * it's slow responding and only answers on the 2nd retry. */ ret = -ENXIO; - if (gmbus_wait_idle(i915)) { - drm_dbg_kms(&i915->drm, + if (gmbus_wait_idle(display)) { + drm_dbg_kms(display->drm, "GMBUS [%s] timed out after NAK\n", adapter->name); ret = -ETIMEDOUT; @@ -703,11 +712,11 @@ clear_err: * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the target's NAK. */ - intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); - intel_de_write_fw(i915, GMBUS1(i915), 0); - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS1(display), GMBUS_SW_CLR_INT); + intel_de_write_fw(display, GMBUS1(display), 0); + intel_de_write_fw(display, GMBUS0(display), 0); - drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", + drm_dbg_kms(display->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); @@ -718,7 +727,7 @@ clear_err: * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GMBUS [%s] NAK on first message, retry\n", adapter->name); goto retry; @@ -727,10 +736,10 @@ clear_err: goto out; timeout: - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", bus->adapter.name, bus->reg0 & 0xff); - intel_de_write_fw(i915, GMBUS0(i915), 0); + intel_de_write_fw(display, GMBUS0(display), 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging @@ -741,9 +750,9 @@ timeout: out: /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - bxt_gmbus_clock_gating(i915, true); + bxt_gmbus_clock_gating(display, true); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) - pch_gmbus_clock_gating(i915, true); + pch_gmbus_clock_gating(display, true); return ret; } @@ -752,7 +761,8 @@ static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; int ret; @@ -776,7 +786,8 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; + struct drm_i915_private *i915 = to_i915(display->drm); u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = {}; struct i2c_msg msgs[] = { @@ -797,7 +808,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) int ret; wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); /* * In order to output Aksv to the receiver, use an indexed write to @@ -806,7 +817,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) */ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; @@ -830,27 +841,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); } static int gmbus_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - return mutex_trylock(&i915->display.gmbus.mutex); + return mutex_trylock(&display->gmbus.mutex); } static void gmbus_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); } static const struct i2c_lock_operations gmbus_lock_ops = { @@ -861,31 +872,32 @@ static const struct i2c_lock_operations gmbus_lock_ops = { /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses - * @i915: i915 device private + * @display: display device */ -int intel_gmbus_setup(struct drm_i915_private *i915) +int intel_gmbus_setup(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct drm_i915_private *i915 = to_i915(display->drm); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); unsigned int pin; int ret; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE; - else if (!HAS_GMCH(i915)) + display->gmbus.mmio_base = VLV_DISPLAY_BASE; + else if (!HAS_GMCH(display)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. */ - i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE; + display->gmbus.mmio_base = PCH_DISPLAY_BASE; - mutex_init(&i915->display.gmbus.mutex); - init_waitqueue_head(&i915->display.gmbus.wait_queue); + mutex_init(&display->gmbus.mutex); + init_waitqueue_head(&display->gmbus.wait_queue); - for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) { const struct gmbus_pin *gmbus_pin; struct intel_gmbus *bus; - gmbus_pin = get_gmbus_pin(i915, pin); + gmbus_pin = get_gmbus_pin(display, pin); if (!gmbus_pin) continue; @@ -901,7 +913,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915) "i915 gmbus %s", gmbus_pin->name); bus->adapter.dev.parent = &pdev->dev; - bus->i915 = i915; + bus->display = display; bus->adapter.algo = &gmbus_algorithm; bus->adapter.lock_ops = &gmbus_lock_ops; @@ -919,7 +931,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915) if (IS_I830(i915)) bus->force_bit = 1; - intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio)); + intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio)); ret = i2c_add_adapter(&bus->adapter); if (ret) { @@ -927,43 +939,43 @@ int intel_gmbus_setup(struct drm_i915_private *i915) goto err; } - i915->display.gmbus.bus[pin] = bus; + display->gmbus.bus[pin] = bus; } - intel_gmbus_reset(i915); + intel_gmbus_reset(display); return 0; err: - intel_gmbus_teardown(i915); + intel_gmbus_teardown(display); return ret; } -struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915, +struct i2c_adapter *intel_gmbus_get_adapter(struct intel_display *display, unsigned int pin) { - if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) || - !i915->display.gmbus.bus[pin])) + if (drm_WARN_ON(display->drm, pin >= ARRAY_SIZE(display->gmbus.bus) || + !display->gmbus.bus[pin])) return NULL; - return &i915->display.gmbus.bus[pin]->adapter; + return &display->gmbus.bus[pin]->adapter; } void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *i915 = bus->i915; + struct intel_display *display = bus->display; - mutex_lock(&i915->display.gmbus.mutex); + mutex_lock(&display->gmbus.mutex); bus->force_bit += force_bit ? 1 : -1; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); - mutex_unlock(&i915->display.gmbus.mutex); + mutex_unlock(&display->gmbus.mutex); } bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) @@ -973,25 +985,25 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) return bus->force_bit; } -void intel_gmbus_teardown(struct drm_i915_private *i915) +void intel_gmbus_teardown(struct intel_display *display) { unsigned int pin; - for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) { struct intel_gmbus *bus; - bus = i915->display.gmbus.bus[pin]; + bus = display->gmbus.bus[pin]; if (!bus) continue; i2c_del_adapter(&bus->adapter); kfree(bus); - i915->display.gmbus.bus[pin] = NULL; + display->gmbus.bus[pin] = NULL; } } -void intel_gmbus_irq_handler(struct drm_i915_private *i915) +void intel_gmbus_irq_handler(struct intel_display *display) { - wake_up_all(&i915->display.gmbus.wait_queue); + wake_up_all(&display->gmbus.wait_queue); } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index 8111eb23e2af..35a200a9efc0 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -8,8 +8,8 @@ #include <linux/types.h> -struct drm_i915_private; struct i2c_adapter; +struct intel_display; #define GMBUS_PIN_DISABLED 0 #define GMBUS_PIN_SSC 1 @@ -34,18 +34,17 @@ struct i2c_adapter; #define GMBUS_NUM_PINS 15 /* including 0 */ -int intel_gmbus_setup(struct drm_i915_private *dev_priv); -void intel_gmbus_teardown(struct drm_i915_private *dev_priv); -bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, - unsigned int pin); +int intel_gmbus_setup(struct intel_display *display); +void intel_gmbus_teardown(struct intel_display *display); +bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin); int intel_gmbus_output_aksv(struct i2c_adapter *adapter); struct i2c_adapter * -intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); +intel_gmbus_get_adapter(struct intel_display *display, unsigned int pin); void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter); -void intel_gmbus_reset(struct drm_i915_private *dev_priv); +void intel_gmbus_reset(struct intel_display *display); -void intel_gmbus_irq_handler(struct drm_i915_private *i915); +void intel_gmbus_irq_handler(struct intel_display *display); #endif /* __INTEL_GMBUS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h index 53aacbda983c..59bad1dda6d6 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h @@ -8,9 +8,9 @@ #include "i915_reg_defs.h" -#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base) +#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base) -#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio)) +#define GPIO(__display, gpio) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5010 + 4 * (gpio)) #define GPIO_CLOCK_DIR_MASK (1 << 0) #define GPIO_CLOCK_DIR_IN (0 << 1) #define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -27,7 +27,7 @@ #define GPIO_DATA_PULLUP_DISABLE (1 << 13) /* clock/port select */ -#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100) +#define GMBUS0(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5100) #define GMBUS_AKSV_SELECT (1 << 11) #define GMBUS_RATE_100KHZ (0 << 8) #define GMBUS_RATE_50KHZ (1 << 8) @@ -37,7 +37,7 @@ #define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) /* command/status */ -#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104) +#define GMBUS1(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5104) #define GMBUS_SW_CLR_INT (1 << 31) #define GMBUS_SW_RDY (1 << 30) #define GMBUS_ENT (1 << 29) /* enable timeout */ @@ -54,7 +54,7 @@ #define GMBUS_SLAVE_WRITE (0 << 0) /* status */ -#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108) +#define GMBUS2(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5108) #define GMBUS_INUSE (1 << 15) #define GMBUS_HW_WAIT_PHASE (1 << 14) #define GMBUS_STALL_TIMEOUT (1 << 13) @@ -64,10 +64,10 @@ #define GMBUS_ACTIVE (1 << 9) /* data buffer bytes 3-0 */ -#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c) +#define GMBUS3(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x510c) /* interrupt mask (Pineview+) */ -#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110) +#define GMBUS4(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5110) #define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) #define GMBUS_NAK_EN (1 << 3) #define GMBUS_IDLE_EN (1 << 2) @@ -75,7 +75,7 @@ #define GMBUS_HW_RDY_EN (1 << 0) /* byte index */ -#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120) +#define GMBUS5(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5120) #define GMBUS_2BYTE_INDEX_EN (1 << 31) #endif /* __INTEL_GMBUS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 377939de0ff4..f6d42ec6949e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -25,6 +25,7 @@ #include "intel_hdcp.h" #include "intel_hdcp_gsc.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" #include "intel_pcode.h" #define KEY_LOAD_TRIES 5 @@ -35,20 +36,20 @@ static void intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder, struct intel_hdcp *hdcp) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); /* Here we assume HDMI is in TMDS mode of operation */ if (encoder->type != INTEL_OUTPUT_HDMI) return; - if (DISPLAY_VER(dev_priv) >= 14) { - if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER)) - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), + if (DISPLAY_VER(display) >= 14) { + if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) + intel_de_rmw(display, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder), 0, HDCP_LINE_REKEY_DISABLE); - else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) || - IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER)) - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder), + else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) || + IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder), 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE); } } @@ -95,10 +96,10 @@ static int intel_hdcp_required_content_stream(struct intel_atomic_state *state, struct intel_digital_port *dig_port) { + struct intel_display *display = to_intel_display(state); struct drm_connector_list_iter conn_iter; struct intel_digital_port *conn_dig_port; struct intel_connector *connector; - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; bool enforce_type0 = false; int k; @@ -111,7 +112,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state, if (!dig_port->hdcp_mst_type1_capable) enforce_type0 = true; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.status == connector_status_disconnected) continue; @@ -133,7 +134,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state, } drm_connector_list_iter_end(&conn_iter); - if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) + if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0)) return -EINVAL; /* @@ -181,7 +182,7 @@ static int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); int ret, i, tries = 2; /* HDCP spec states that we must retry the bksv if it is invalid */ @@ -193,7 +194,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, break; } if (i == tries) { - drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); + drm_dbg_kms(display->drm, "Bksv is invalid\n"); return -ENODEV; } @@ -232,7 +233,7 @@ bool intel_hdcp_get_capability(struct intel_connector *connector) */ static bool intel_hdcp2_prerequisite(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; /* I915 support for HDCP2.2 */ @@ -240,18 +241,18 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ - if (intel_hdcp_gsc_cs_required(i915)) { - if (!intel_hdcp_gsc_check_status(i915)) + if (intel_hdcp_gsc_cs_required(display)) { + if (!intel_hdcp_gsc_check_status(display)) return false; } /* MEI/GSC interface is solid depending on which is used */ - mutex_lock(&i915->display.hdcp.hdcp_mutex); - if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + if (!display->hdcp.comp_added || !display->hdcp.arbiter) { + mutex_unlock(&display->hdcp.hdcp_mutex); return false; } - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return true; } @@ -287,19 +288,19 @@ void intel_hdcp_get_remote_capability(struct intel_connector *connector, *hdcp2_capable = false; } -static bool intel_hdcp_in_use(struct drm_i915_private *i915, +static bool intel_hdcp_in_use(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(i915, - HDCP_STATUS(i915, cpu_transcoder, port)) & + return intel_de_read(display, + HDCP_STATUS(display, cpu_transcoder, port)) & HDCP_STATUS_ENC; } -static bool intel_hdcp2_in_use(struct drm_i915_private *i915, +static bool intel_hdcp2_in_use(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - return intel_de_read(i915, - HDCP2_STATUS(i915, cpu_transcoder, port)) & + return intel_de_read(display, + HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS; } @@ -324,8 +325,9 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, return 0; } -static bool hdcp_key_loadable(struct drm_i915_private *i915) +static bool hdcp_key_loadable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); enum i915_power_well_id id; intel_wakeref_t wakeref; bool enabled = false; @@ -352,19 +354,20 @@ static bool hdcp_key_loadable(struct drm_i915_private *i915) return enabled; } -static void intel_hdcp_clear_keys(struct drm_i915_private *i915) +static void intel_hdcp_clear_keys(struct intel_display *display) { - intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); - intel_de_write(i915, HDCP_KEY_STATUS, + intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + intel_de_write(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); } -static int intel_hdcp_load_keys(struct drm_i915_private *i915) +static int intel_hdcp_load_keys(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 val; - val = intel_de_read(i915, HDCP_KEY_STATUS); + val = intel_de_read(display, HDCP_KEY_STATUS); if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) return 0; @@ -373,7 +376,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) * out of reset. So if Key is not already loaded, its an error state. */ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) return -ENXIO; /* @@ -384,20 +387,20 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) * process from other platforms. These platforms use the GT Driver * Mailbox interface. */ - if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { + if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) { ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to initiate HDCP key load (%d)\n", ret); return ret; } } else { - intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); } /* Wait for the keys to load (500us) */ - ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS, + ret = intel_de_wait_custom(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) @@ -406,27 +409,27 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) return -ENXIO; /* Send Aksv over to PCH display for use in authentication */ - intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); return 0; } /* Returns updated SHA-1 index */ -static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) +static int intel_write_sha_text(struct intel_display *display, u32 sha_text) { - intel_de_write(i915, HDCP_SHA_TEXT, sha_text); - if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { - drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); + intel_de_write(display, HDCP_SHA_TEXT, sha_text); + if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { + drm_err(display->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } return 0; } static -u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, +u32 intel_hdcp_get_repeater_ctl(struct intel_display *display, enum transcoder cpu_transcoder, enum port port) { - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { switch (cpu_transcoder) { case TRANSCODER_A: return HDCP_TRANSA_REP_PRESENT | @@ -441,7 +444,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, return HDCP_TRANSD_REP_PRESENT | HDCP_TRANSD_SHA1_M0; default: - drm_err(&i915->drm, "Unknown transcoder %d\n", + drm_err(display->drm, "Unknown transcoder %d\n", cpu_transcoder); return 0; } @@ -459,7 +462,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - drm_err(&i915->drm, "Unknown port %d\n", port); + drm_err(display->drm, "Unknown port %d\n", port); return 0; } } @@ -469,8 +472,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; @@ -481,7 +484,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, ret = shim->read_v_prime_part(dig_port, i, &vprime); if (ret) return ret; - intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); + intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime); } /* @@ -497,8 +500,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx = 0; sha_text = 0; sha_leftovers = 0; - rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); - intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; @@ -510,14 +513,14 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_text |= ksv[j] << off; } - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); if (!(sha_idx % 64)) - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Store the leftover bytes from the ksv in sha_text */ @@ -534,7 +537,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, if (sizeof(sha_text) > sha_leftovers) continue; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_leftovers = 0; @@ -550,73 +553,73 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, */ if (sha_leftovers == 0) { /* Write 16 bits of text, 16 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(i915, + ret = intel_write_sha_text(display, bstatus[0] << 8 | bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 1) { /* Write 24 bits of text, 8 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); sha_text |= bstatus[0] << 16 | bstatus[1] << 8; /* Only 24-bits of data, must be in the LSB */ sha_text = (sha_text & 0xffffff00) >> 8; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 2) { /* Write 32 bits of text */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 8 | bstatus[1]; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); for (i = 0; i < 2; i++) { - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -626,56 +629,56 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * Terminate the SHA-1 stream by hand. For the other leftover * cases this is appended by the hardware. */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { /* Write 32 bits of text (filled from LSB) */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0]; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); - ret = intel_write_sha_text(i915, bstatus[1]); + ret = intel_write_sha_text(display, bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else { - drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", + drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n", sha_leftovers); return -EINVAL; } - intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ while ((sha_idx % 64) < (64 - sizeof(sha_text))) { - ret = intel_write_sha_text(i915, 0); + ret = intel_write_sha_text(display, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); @@ -687,20 +690,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, * - 10 bytes for BINFO/BSTATUS(2), M0(8) */ sha_text = (num_downstream * 5 + 10) * 8; - ret = intel_write_sha_text(i915, sha_text); + ret = intel_write_sha_text(display, sha_text); if (ret < 0) return ret; /* Tell the HW we're done with the hash and wait for it to ACK */ - intel_de_write(i915, HDCP_REP_CTL, + intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_de_wait_for_set(i915, HDCP_REP_CTL, + if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, 1)) { - drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); + drm_err(display->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } - if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { - drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); + if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n"); return -ENXIO; } @@ -711,15 +714,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, static int intel_hdcp_auth_downstream(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "KSV list failed to become ready (%d)\n", ret); return ret; } @@ -730,7 +733,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); + drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n"); return -EPERM; } @@ -743,14 +746,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); if (num_downstream == 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Repeater with zero downstream devices\n"); return -EINVAL; } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) { - drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); + drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n"); return -ENOMEM; } @@ -758,9 +761,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (ret) goto err; - if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, + if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo, num_downstream) > 0) { - drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); + drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; } @@ -778,12 +781,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) } if (i == tries) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "V Prime validation failed.(%d)\n", ret); goto err; } - drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", + drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n", num_downstream); ret = 0; err: @@ -794,8 +797,8 @@ err: /* Implements Part 1 of the HDCP authorization procedure */ static int intel_hdcp_auth(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; @@ -827,7 +830,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret) return ret; if (!hdcp_capable) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Panel is not HDCP capable\n"); return -EINVAL; } @@ -835,24 +838,24 @@ static int intel_hdcp_auth(struct intel_connector *connector) /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - intel_de_write(i915, - HDCP_ANINIT(i915, cpu_transcoder, port), + intel_de_write(display, + HDCP_ANINIT(display, cpu_transcoder, port), get_random_u32()); - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait_for_set(display, + HDCP_STATUS(display, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { - drm_err(&i915->drm, "Timed out waiting for An\n"); + drm_err(display->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = intel_de_read(i915, - HDCP_ANLO(i915, cpu_transcoder, port)); - an.reg[1] = intel_de_read(i915, - HDCP_ANHI(i915, cpu_transcoder, port)); + an.reg[0] = intel_de_read(display, + HDCP_ANLO(display, cpu_transcoder, port)); + an.reg[1] = intel_de_read(display, + HDCP_ANHI(display, cpu_transcoder, port)); ret = shim->write_an_aksv(dig_port, an.shim); if (ret) return ret; @@ -865,34 +868,34 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret < 0) return ret; - if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { - drm_err(&i915->drm, "BKSV is revoked\n"); + if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) { + drm_err(display->drm, "BKSV is revoked\n"); return -EPERM; } - intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port), bksv.reg[0]); - intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(dig_port, &repeater_present); if (ret) return ret; if (repeater_present) - intel_de_write(i915, HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); + intel_de_write(display, HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port)); ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { - drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); + drm_err(display->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } @@ -918,30 +921,30 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(dig_port, ri.shim); if (ret) return ret; - intel_de_write(i915, - HDCP_RPRIME(i915, cpu_transcoder, port), + intel_de_write(display, + HDCP_RPRIME(display, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Timed out waiting for Ri prime match (%x)\n", - intel_de_read(i915, - HDCP_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, + HDCP_STATUS(display, cpu_transcoder, port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + if (intel_de_wait_for_set(display, + HDCP_STATUS(display, cpu_transcoder, port), HDCP_STATUS_ENC, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, "Timed out waiting for encryption\n"); + drm_err(display->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -949,42 +952,42 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (shim->stream_encryption) { ret = shim->stream_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", + drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } if (repeater_present) return intel_hdcp_auth_downstream(connector); - drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); + drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n"); return 0; } static int _intel_hdcp_disable(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; u32 repeater_ctl; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n", connector->base.base.id, connector->base.name); if (hdcp->shim->stream_encryption) { ret = hdcp->shim->stream_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", + drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); /* * If there are other connectors on this port using HDCP, @@ -996,51 +999,51 @@ static int _intel_hdcp_disable(struct intel_connector *connector) } hdcp->hdcp_encrypted = false; - intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); - if (intel_de_wait_for_clear(i915, - HDCP_STATUS(i915, cpu_transcoder, port), + intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0); + if (intel_de_wait_for_clear(display, + HDCP_STATUS(display, cpu_transcoder, port), ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } - repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, + repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port); - intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); + intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); + drm_err(display->drm, "Failed to disable HDCP signalling\n"); return ret; } - drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); + drm_dbg_kms(display->drm, "HDCP is disabled\n"); return 0; } static int intel_hdcp1_enable(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int i, ret, tries = 3; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n", connector->base.base.id, connector->base.name); - if (!hdcp_key_loadable(i915)) { - drm_err(&i915->drm, "HDCP key Load is not possible\n"); + if (!hdcp_key_loadable(display)) { + drm_err(display->drm, "HDCP key Load is not possible\n"); return -ENXIO; } for (i = 0; i < KEY_LOAD_TRIES; i++) { - ret = intel_hdcp_load_keys(i915); + ret = intel_hdcp_load_keys(display); if (!ret) break; - intel_hdcp_clear_keys(i915); + intel_hdcp_clear_keys(display); } if (ret) { - drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", + drm_err(display->drm, "Could not load HDCP keys, (%d)\n", ret); return ret; } @@ -1053,13 +1056,13 @@ static int intel_hdcp1_enable(struct intel_connector *connector) return 0; } - drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); + drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret); /* Ensuring HDCP encryption and signalling are stopped. */ _intel_hdcp_disable(connector); } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } @@ -1072,20 +1075,20 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) static void intel_hdcp_update_value(struct intel_connector *connector, u64 value, bool update_property) { - struct drm_device *dev = connector->base.dev; + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *i915 = to_i915(connector->base.dev); - drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); + drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex)); if (hdcp->value == value) return; - drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); + drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex)); if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { - if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) + if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0)) dig_port->num_hdcp_streams--; } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { dig_port->num_hdcp_streams++; @@ -1102,8 +1105,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector, /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -1121,12 +1124,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&i915->drm, - !intel_hdcp_in_use(i915, cpu_transcoder, port))) { - drm_err(&i915->drm, + if (drm_WARN_ON(display->drm, + !intel_hdcp_in_use(display, cpu_transcoder, port))) { + drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n", connector->base.base.id, connector->base.name, - intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); ret = -ENXIO; intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, @@ -1142,13 +1145,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n", connector->base.base.id, connector->base.name); ret = _intel_hdcp_disable(connector); if (ret) { - drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); + drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); @@ -1169,9 +1172,9 @@ static void intel_hdcp_prop_work(struct work_struct *work) struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, prop_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); - drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); + drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); mutex_lock(&hdcp->mutex); /* @@ -1184,40 +1187,40 @@ static void intel_hdcp_prop_work(struct work_struct *work) hdcp->value); mutex_unlock(&hdcp->mutex); - drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + drm_modeset_unlock(&display->drm->mode_config.connection_mutex); drm_connector_put(&connector->base); } -bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) +bool is_hdcp_supported(struct intel_display *display, enum port port) { - return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && - (DISPLAY_VER(i915) >= 12 || port < PORT_E); + return DISPLAY_RUNTIME_INFO(display)->has_hdcp && + (DISPLAY_VER(display) >= 12 || port < PORT_E); } static int hdcp2_prepare_ake_init(struct intel_connector *connector, struct hdcp2_ake_init *ake_data) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); if (ret) - drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", + drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1229,17 +1232,17 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct hdcp2_ake_no_stored_km *ek_pub_km, size_t *msg_sz) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } @@ -1247,9 +1250,9 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", + drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1257,24 +1260,24 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, static int hdcp2_verify_hprime(struct intel_connector *connector, struct hdcp2_ake_send_hprime *rx_hprime) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1283,25 +1286,25 @@ static int hdcp2_store_pairing_info(struct intel_connector *connector, struct hdcp2_ake_send_pairing_info *pairing_info) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); if (ret < 0) - drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", + drm_dbg_kms(display->drm, "Store pairing info failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1310,25 +1313,25 @@ static int hdcp2_prepare_lc_init(struct intel_connector *connector, struct hdcp2_lc_init *lc_init) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); if (ret < 0) - drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", + drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1337,25 +1340,25 @@ static int hdcp2_verify_lprime(struct intel_connector *connector, struct hdcp2_lc_send_lprime *rx_lprime) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", + drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1363,25 +1366,25 @@ hdcp2_verify_lprime(struct intel_connector *connector, static int hdcp2_prepare_skey(struct intel_connector *connector, struct hdcp2_ske_send_eks *ske_data) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); if (ret < 0) - drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", + drm_dbg_kms(display->drm, "Get session key failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1392,17 +1395,17 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, *rep_topology, struct hdcp2_rep_send_ack *rep_send_ack) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } @@ -1411,9 +1414,9 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, rep_topology, rep_send_ack); if (ret < 0) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Verify rep topology failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1422,71 +1425,71 @@ static int hdcp2_verify_mprime(struct intel_connector *connector, struct hdcp2_rep_stream_ready *stream_ready) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); if (ret < 0) - drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } static int hdcp2_authenticate_port(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); if (ret < 0) - drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", + drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n", ret); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } static int hdcp2_close_session(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - arbiter = i915->display.hdcp.arbiter; + mutex_lock(&display->hdcp.hdcp_mutex); + arbiter = display->hdcp.arbiter; if (!arbiter || !arbiter->ops) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, &dig_port->hdcp_port_data); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } @@ -1499,7 +1502,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector) /* Authentication flow starts from here */ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_ake_init ake_init; @@ -1510,7 +1513,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; size_t size; - int ret; + int ret, i; /* Init for seq_num */ hdcp->seq_num_v = 0; @@ -1520,27 +1523,50 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - ret = shim->write_2_2_msg(connector, &msgs.ake_init, - sizeof(msgs.ake_init)); - if (ret < 0) - return ret; + /* + * Retry the first read and write to downstream at least 10 times + * with a 50ms delay if not hdcp2 capable(dock decides to stop advertising + * hdcp2 capability for some reason). The reason being that + * during suspend resume dock usually keeps the HDCP2 registers inaccesible + * causing AUX error. This wouldn't be a big problem if the userspace + * just kept retrying with some delay while it continues to play low + * value content but most userpace applications end up throwing an error + * when it receives one from KMD. This makes sure we give the dock + * and the sink devices to complete its power cycle and then try HDCP + * authentication. The values of 10 and delay of 50ms was decided based + * on multiple trial and errors. + */ + for (i = 0; i < 10; i++) { + if (!intel_hdcp2_get_capability(connector)) { + msleep(50); + continue; + } + + ret = shim->write_2_2_msg(connector, &msgs.ake_init, + sizeof(msgs.ake_init)); + if (ret < 0) + continue; + + ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, + &msgs.send_cert, sizeof(msgs.send_cert)); + if (ret > 0) + break; + } - ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT, - &msgs.send_cert, sizeof(msgs.send_cert)); if (ret < 0) return ret; if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { - drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); + drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); - if (drm_hdcp_check_ksvs_revoked(&i915->drm, + if (drm_hdcp_check_ksvs_revoked(display->drm, msgs.send_cert.cert_rx.receiver_id, 1) > 0) { - drm_err(&i915->drm, "Receiver ID is revoked\n"); + drm_err(display->drm, "Receiver ID is revoked\n"); return -EPERM; } @@ -1691,8 +1717,8 @@ out: static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_send_receiverid_list recvid_list; @@ -1712,7 +1738,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { - drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); + drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n"); return -EINVAL; } @@ -1725,7 +1751,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP1.x or 2.0 Legacy Device Downstream\n"); return -EINVAL; } @@ -1735,23 +1761,23 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); if (!hdcp->hdcp2_encrypted && seq_num_v) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Non zero Seq_num_v at first RecvId_List msg\n"); return -EINVAL; } if (seq_num_v < hdcp->seq_num_v) { /* Roll over of the seq_num_v from repeater. Reauthenticate. */ - drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); + drm_dbg_kms(display->drm, "Seq_num_v roll over.\n"); return -EINVAL; } device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - if (drm_hdcp_check_ksvs_revoked(&i915->drm, + if (drm_hdcp_check_ksvs_revoked(display->drm, msgs.recvid_list.receiver_ids, device_cnt) > 0) { - drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); + drm_err(display->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } @@ -1772,27 +1798,27 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) static int hdcp2_authenticate_sink(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; int ret; ret = hdcp2_authentication_key_exchange(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); + drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret); return ret; } ret = hdcp2_locality_check(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Locality Check failed. Err : %d\n", ret); return ret; } ret = hdcp2_session_key_exchange(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); + drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret); return ret; } @@ -1807,7 +1833,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) if (hdcp->is_repeater) { ret = hdcp2_authenticate_repeater_topology(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Repeater Auth Failed. Err: %d\n", ret); return ret; } @@ -1818,17 +1844,17 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) static int hdcp2_enable_stream_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; enum port port = dig_port->base.port; int ret = 0; - if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n", connector->base.base.id, connector->base.name); ret = -EPERM; goto link_recover; @@ -1837,11 +1863,11 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, true); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", + drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } @@ -1849,7 +1875,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector) link_recover: if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); dig_port->hdcp_auth_status = false; data->k = 0; @@ -1859,35 +1885,35 @@ link_recover: static int hdcp2_enable_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&i915->drm, - intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + drm_WARN_ON(display->drm, + intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to enable HDCP signalling. %d\n", ret); return ret; } } - if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & + if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ - intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), + intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), 0, CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_set(i915, - HDCP2_STATUS(i915, cpu_transcoder, + ret = intel_de_wait_for_set(display, + HDCP2_STATUS(display, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); @@ -1898,32 +1924,33 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) static int hdcp2_disable_encryption(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & - LINK_ENCRYPTION_STATUS)); + drm_WARN_ON(display->drm, + !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS)); - intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), + intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port), CTL_LINK_ENCRYPTION_REQ, 0); - ret = intel_de_wait_for_clear(i915, - HDCP2_STATUS(i915, cpu_transcoder, + ret = intel_de_wait_for_clear(display, + HDCP2_STATUS(display, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) - drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); + drm_dbg_kms(display->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "Failed to disable HDCP signalling. %d\n", ret); return ret; @@ -1936,7 +1963,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); int i, tries = 3, ret; if (!connector->hdcp.is_repeater) @@ -1949,12 +1976,12 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector) /* Lets restart the auth incase of seq_num_m roll over */ if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "seq_num_m roll over.(%d)\n", ret); break; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2 stream management %d of %d Failed.(%d)\n", i + 1, tries, ret); } @@ -1965,8 +1992,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector) static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret = 0, i, tries = 3; for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { @@ -1974,7 +2001,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, if (!ret) { ret = intel_hdcp_prepare_streams(state, connector); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Prepare stream failed.(%d)\n", ret); break; @@ -1982,7 +2009,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, ret = hdcp2_propagate_stream_management_info(connector); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Stream management failed.(%d)\n", ret); break; @@ -1991,15 +2018,15 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, ret = hdcp2_authenticate_port(connector); if (!ret) break; - drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", + drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n", ret); } /* Clearing the mei hdcp session */ - drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", + drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", i + 1, tries, ret); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); } if (!ret && !dig_port->hdcp_auth_status) { @@ -2010,10 +2037,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); ret = hdcp2_enable_encryption(connector); if (ret < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Encryption Enable Failed.(%d)\n", ret); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); } } @@ -2026,11 +2053,11 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, static int _intel_hdcp2_enable(struct intel_atomic_state *state, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n", connector->base.base.id, connector->base.name, hdcp->content_type); @@ -2038,12 +2065,12 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state, ret = hdcp2_authenticate_and_encrypt(state, connector); if (ret) { - drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", + drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", hdcp->content_type, ret); return ret; } - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n", connector->base.base.id, connector->base.name, hdcp->content_type); @@ -2054,23 +2081,23 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state, static int _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; int ret; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n", connector->base.base.id, connector->base.name); if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, false); if (ret) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n", connector->base.base.id, connector->base.name); return ret; } - drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", + drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) @@ -2080,7 +2107,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery ret = hdcp2_disable_encryption(connector); if (hdcp2_deauthenticate_port(connector) < 0) - drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); + drm_dbg_kms(display->drm, "Port deauth failed.\n"); connector->hdcp.hdcp2_encrypted = false; dig_port->hdcp_auth_status = false; @@ -2092,8 +2119,8 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery /* Implements the Link Integrity Check for HDCP2.2 */ static int intel_hdcp2_check_link(struct intel_connector *connector) { + struct intel_display *display = to_intel_display(connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; @@ -2110,11 +2137,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (drm_WARN_ON(&i915->drm, - !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { - drm_err(&i915->drm, + if (drm_WARN_ON(display->drm, + !intel_hdcp2_in_use(display, cpu_transcoder, port))) { + drm_err(display->drm, "HDCP2.2 link stopped the encryption, %x\n", - intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); + intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port))); ret = -ENXIO; _intel_hdcp2_disable(connector, true); intel_hdcp_update_value(connector, @@ -2137,17 +2164,17 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "HDCP2.2 Downstream topology change\n"); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", connector->base.base.id, connector->base.name); } ret = _intel_hdcp2_disable(connector, true); if (ret) { - drm_err(&i915->drm, + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n", connector->base.base.id, connector->base.name, ret); intel_hdcp_update_value(connector, @@ -2169,7 +2196,8 @@ static void intel_hdcp_check_work(struct work_struct *work) struct intel_hdcp, check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); if (drm_connector_is_unregistered(&connector->base)) return; @@ -2186,13 +2214,12 @@ static int i915_hdcp_component_bind(struct device *drv_kdev, struct device *mei_kdev, void *data) { struct intel_display *display = to_intel_display(drv_kdev); - struct drm_i915_private *i915 = to_i915(display->drm); - drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; - i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg(display->drm, "I915 HDCP comp bind\n"); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data; + display->hdcp.arbiter->hdcp_dev = mei_kdev; + mutex_unlock(&display->hdcp.hdcp_mutex); return 0; } @@ -2201,12 +2228,11 @@ static void i915_hdcp_component_unbind(struct device *drv_kdev, struct device *mei_kdev, void *data) { struct intel_display *display = to_intel_display(drv_kdev); - struct drm_i915_private *i915 = to_i915(display->drm); - drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = NULL; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + drm_dbg(display->drm, "I915 HDCP comp unbind\n"); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = NULL; + mutex_unlock(&display->hdcp.hdcp_mutex); } static const struct component_ops i915_hdcp_ops = { @@ -2240,11 +2266,11 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; enum port port = dig_port->base.port; - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) data->hdcp_ddi = intel_get_hdcp_ddi_index(port); else /* @@ -2264,55 +2290,57 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, data->protocol = (u8)shim->protocol; if (!data->streams) - data->streams = kcalloc(INTEL_NUM_PIPES(i915), + data->streams = kcalloc(INTEL_NUM_PIPES(display), sizeof(struct hdcp2_streamid_type), GFP_KERNEL); if (!data->streams) { - drm_err(&i915->drm, "Out of Memory\n"); + drm_err(display->drm, "Out of Memory\n"); return -ENOMEM; } return 0; } -static bool is_hdcp2_supported(struct drm_i915_private *i915) +static bool is_hdcp2_supported(struct intel_display *display) { - if (intel_hdcp_gsc_cs_required(i915)) + struct drm_i915_private *i915 = to_i915(display->drm); + + if (intel_hdcp_gsc_cs_required(display)) return true; if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; - return (DISPLAY_VER(i915) >= 10 || + return (DISPLAY_VER(display) >= 10 || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)); } -void intel_hdcp_component_init(struct drm_i915_private *i915) +void intel_hdcp_component_init(struct intel_display *display) { int ret; - if (!is_hdcp2_supported(i915)) + if (!is_hdcp2_supported(display)) return; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); + mutex_lock(&display->hdcp.hdcp_mutex); + drm_WARN_ON(display->drm, display->hdcp.comp_added); - i915->display.hdcp.comp_added = true; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); - if (intel_hdcp_gsc_cs_required(i915)) - ret = intel_hdcp_gsc_init(i915); + display->hdcp.comp_added = true; + mutex_unlock(&display->hdcp.hdcp_mutex); + if (intel_hdcp_gsc_cs_required(display)) + ret = intel_hdcp_gsc_init(display); else - ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, + ret = component_add_typed(display->drm->dev, &i915_hdcp_ops, I915_COMPONENT_HDCP); if (ret < 0) { - drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", + drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n", ret); - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.comp_added = false; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.comp_added = false; + mutex_unlock(&display->hdcp.hdcp_mutex); return; } } @@ -2321,13 +2349,13 @@ static void intel_hdcp2_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = initialize_hdcp_port_data(connector, dig_port, shim); if (ret) { - drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); + drm_dbg_kms(display->drm, "Mei hdcp data init failed\n"); return; } @@ -2338,19 +2366,18 @@ int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret; if (!shim) return -EINVAL; - if (is_hdcp2_supported(i915)) + if (is_hdcp2_supported(display)) intel_hdcp2_init(connector, dig_port, shim); - ret = - drm_connector_attach_content_protection_property(&connector->base, - hdcp->hdcp2_supported); + ret = drm_connector_attach_content_protection_property(&connector->base, + hdcp->hdcp2_supported); if (ret) { hdcp->hdcp2_supported = false; kfree(dig_port->hdcp_port_data.streams); @@ -2371,7 +2398,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -2383,14 +2411,14 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, return -ENOENT; if (!connector->encoder) { - drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", connector->base.base.id, connector->base.name); return -ENODEV; } mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = (u8)conn_state->hdcp_content_type; @@ -2402,7 +2430,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, hdcp->stream_transcoder = INVALID_TRANSCODER; } - if (DISPLAY_VER(i915) >= 12) + if (DISPLAY_VER(display) >= 12) dig_port->hdcp_port_data.hdcp_transcoder = intel_get_hdcp_transcoder(hdcp->cpu_transcoder); @@ -2553,21 +2581,21 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, _intel_hdcp_enable(state, encoder, crtc_state, conn_state); } -void intel_hdcp_component_fini(struct drm_i915_private *i915) +void intel_hdcp_component_fini(struct intel_display *display) { - mutex_lock(&i915->display.hdcp.hdcp_mutex); - if (!i915->display.hdcp.comp_added) { - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + if (!display->hdcp.comp_added) { + mutex_unlock(&display->hdcp.hdcp_mutex); return; } - i915->display.hdcp.comp_added = false; - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + display->hdcp.comp_added = false; + mutex_unlock(&display->hdcp.hdcp_mutex); - if (intel_hdcp_gsc_cs_required(i915)) - intel_hdcp_gsc_fini(i915); + if (intel_hdcp_gsc_cs_required(display)) + intel_hdcp_gsc_fini(display); else - component_del(i915->drm.dev, &i915_hdcp_ops); + component_del(display->drm->dev, &i915_hdcp_ops); } void intel_hdcp_cleanup(struct intel_connector *connector) @@ -2657,7 +2685,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); + struct drm_i915_private *i915 = to_i915(display->drm); if (!hdcp->shim) return; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 477f2d2bb120..d99830cfb798 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -12,13 +12,13 @@ struct drm_connector; struct drm_connector_state; -struct drm_i915_private; struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; +struct intel_digital_port; +struct intel_display; struct intel_encoder; struct intel_hdcp_shim; -struct intel_digital_port; enum port; enum transcoder; @@ -37,14 +37,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool is_hdcp_supported(struct drm_i915_private *i915, enum port port); +bool is_hdcp_supported(struct intel_display *display, enum port port); bool intel_hdcp_get_capability(struct intel_connector *connector); bool intel_hdcp2_get_capability(struct intel_connector *connector); void intel_hdcp_get_remote_capability(struct intel_connector *connector, bool *hdcp_capable, bool *hdcp2_capable); -void intel_hdcp_component_init(struct drm_i915_private *i915); -void intel_hdcp_component_fini(struct drm_i915_private *i915); +void intel_hdcp_component_init(struct intel_display *display); +void intel_hdcp_component_fini(struct intel_display *display); void intel_hdcp_cleanup(struct intel_connector *connector); void intel_hdcp_handle_cp_irq(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 16afeb8a3a8d..55965844d829 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -19,18 +19,19 @@ struct intel_hdcp_gsc_message { void *hdcp_cmd_out; }; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) +bool intel_hdcp_gsc_cs_required(struct intel_display *display) { - return DISPLAY_VER(i915) >= 14; + return DISPLAY_VER(display) >= 14; } -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915) +bool intel_hdcp_gsc_check_status(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_gt *gt = i915->media_gt; struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "GSC components required for HDCP2.2 are not ready\n"); return false; } @@ -106,8 +107,9 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = { .close_hdcp_session = intel_hdcp_gsc_close_session, }; -static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) +static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_hdcp_gsc_message *hdcp_message; int ret; @@ -120,19 +122,19 @@ static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ - i915->display.hdcp.hdcp_message = hdcp_message; + display->hdcp.hdcp_message = hdcp_message; ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message); if (ret) - drm_err(&i915->drm, "Could not initialize hdcp_message\n"); + drm_err(display->drm, "Could not initialize hdcp_message\n"); return ret; } -static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) +static void intel_hdcp_gsc_free_message(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message = - i915->display.hdcp.hdcp_message; + display->hdcp.hdcp_message; hdcp_message->hdcp_cmd_in = NULL; hdcp_message->hdcp_cmd_out = NULL; @@ -140,7 +142,7 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) kfree(hdcp_message); } -int intel_hdcp_gsc_init(struct drm_i915_private *i915) +int intel_hdcp_gsc_init(struct intel_display *display) { struct i915_hdcp_arbiter *data; int ret; @@ -149,20 +151,20 @@ int intel_hdcp_gsc_init(struct drm_i915_private *i915) if (!data) return -ENOMEM; - mutex_lock(&i915->display.hdcp.hdcp_mutex); - i915->display.hdcp.arbiter = data; - i915->display.hdcp.arbiter->hdcp_dev = i915->drm.dev; - i915->display.hdcp.arbiter->ops = &gsc_hdcp_ops; - ret = intel_hdcp_gsc_hdcp2_init(i915); - mutex_unlock(&i915->display.hdcp.hdcp_mutex); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = data; + display->hdcp.arbiter->hdcp_dev = display->drm->dev; + display->hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(display); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } -void intel_hdcp_gsc_fini(struct drm_i915_private *i915) +void intel_hdcp_gsc_fini(struct intel_display *display) { - intel_hdcp_gsc_free_message(i915); - kfree(i915->display.hdcp.arbiter); + intel_hdcp_gsc_free_message(display); + kfree(display->hdcp.arbiter); } static int intel_gsc_send_sync(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h index 5f610df61cc9..5695a5e4f609 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -10,14 +10,15 @@ #include <linux/types.h> struct drm_i915_private; +struct intel_display; struct intel_hdcp_gsc_message; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); +bool intel_hdcp_gsc_cs_required(struct intel_display *display); ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len); -int intel_hdcp_gsc_init(struct drm_i915_private *i915); -void intel_hdcp_gsc_fini(struct drm_i915_private *i915); -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); +int intel_hdcp_gsc_init(struct intel_display *display); +void intel_hdcp_gsc_fini(struct intel_display *display); +bool intel_hdcp_gsc_check_status(struct intel_display *display); #endif /* __INTEL_HDCP_GCS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c index 35bdb532bbb3..129104fa9b16 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c @@ -46,12 +46,12 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, (u8 *)&session_init_out, sizeof(session_init_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_INITIATE_HDCP2_SESSION, session_init_out.header.status); return -EIO; @@ -108,12 +108,12 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev, (u8 *)&verify_rxcert_out, sizeof(verify_rxcert_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); return byte; } if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_VERIFY_RECEIVER_CERT, verify_rxcert_out.header.status); return -EIO; @@ -171,12 +171,12 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data, (u8 *)&send_hprime_out, sizeof(send_hprime_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); return -EIO; } @@ -222,12 +222,12 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat (u8 *)&pairing_info_out, sizeof(pairing_info_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", WIRED_AKE_SEND_PAIRING_INFO, pairing_info_out.header.status); return -EIO; @@ -269,12 +269,12 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev, byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), (u8 *)&lc_init_out, sizeof(lc_init_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); return -EIO; } @@ -323,12 +323,12 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data, (u8 *)&verify_lprime_out, sizeof(verify_lprime_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VALIDATE_LOCALITY, verify_lprime_out.header.status); return -EIO; @@ -369,12 +369,12 @@ int intel_hdcp_gsc_get_session_key(struct device *dev, byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), (u8 *)&get_skey_out, sizeof(get_skey_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_GET_SESSION_KEY, get_skey_out.header.status); return -EIO; } @@ -435,12 +435,12 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev, (u8 *)&verify_repeater_out, sizeof(verify_repeater_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VERIFY_REPEATER, verify_repeater_out.header.status); return -EIO; @@ -504,12 +504,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev, sizeof(verify_mprime_out)); kfree(verify_mprime_in); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_REPEATER_AUTH_STREAM_REQ, verify_mprime_out.header.status); return -EIO; @@ -552,12 +552,12 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev, (u8 *)&enable_auth_out, sizeof(enable_auth_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_ENABLE_AUTH, enable_auth_out.header.status); return -EIO; } @@ -599,12 +599,12 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data) (u8 *)&session_close_out, sizeof(session_close_out)); if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", + drm_dbg_kms(display->drm, "Session Close Failed. status: 0x%X\n", session_close_out.header.status); return -EIO; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h index ce199d6f6232..2d597f27e931 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h @@ -22,11 +22,12 @@ struct hdcp2_ske_send_eks; struct hdcp2_rep_send_receiverid_list; struct hdcp2_rep_send_ack; struct hdcp2_rep_stream_ready; +struct intel_display; ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len); -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); +bool intel_hdcp_gsc_check_status(struct intel_display *display); int intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_init *ake_data); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_shim.h b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h new file mode 100644 index 000000000000..abf9ae2f4ada --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_HDCP_SHIM_H__ +#define __INTEL_HDCP_SHIM_H__ + +#include <linux/types.h> + +#include <drm/intel/i915_hdcp_interface.h> + +enum transcoder; +struct intel_connector; +struct intel_digital_port; + +enum check_link_response { + HDCP_LINK_PROTECTED = 0, + HDCP_TOPOLOGY_CHANGE, + HDCP_LINK_INTEGRITY_FAILURE, + HDCP_REAUTH_REQUEST +}; + +/* + * This structure serves as a translation layer between the generic HDCP code + * and the bus-specific code. What that means is that HDCP over HDMI differs + * from HDCP over DP, so to account for these differences, we need to + * communicate with the receiver through this shim. + * + * For completeness, the 2 buses differ in the following ways: + * - DP AUX vs. DDC + * HDCP registers on the receiver are set via DP AUX for DP, and + * they are set via DDC for HDMI. + * - Receiver register offsets + * The offsets of the registers are different for DP vs. HDMI + * - Receiver register masks/offsets + * For instance, the ready bit for the KSV fifo is in a different + * place on DP vs HDMI + * - Receiver register names + * Seriously. In the DP spec, the 16-bit register containing + * downstream information is called BINFO, on HDMI it's called + * BSTATUS. To confuse matters further, DP has a BSTATUS register + * with a completely different definition. + * - KSV FIFO + * On HDMI, the ksv fifo is read all at once, whereas on DP it must + * be read 3 keys at a time + * - Aksv output + * Since Aksv is hidden in hardware, there's different procedures + * to send it over DP AUX vs DDC + */ +struct intel_hdcp_shim { + /* Outputs the transmitter's An and Aksv values to the receiver. */ + int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an); + + /* Reads the receiver's key selection vector */ + int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv); + + /* + * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The + * definitions are the same in the respective specs, but the names are + * different. Call it BSTATUS since that's the name the HDMI spec + * uses and it was there first. + */ + int (*read_bstatus)(struct intel_digital_port *dig_port, + u8 *bstatus); + + /* Determines whether a repeater is present downstream */ + int (*repeater_present)(struct intel_digital_port *dig_port, + bool *repeater_present); + + /* Reads the receiver's Ri' value */ + int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri); + + /* Determines if the receiver's KSV FIFO is ready for consumption */ + int (*read_ksv_ready)(struct intel_digital_port *dig_port, + bool *ksv_ready); + + /* Reads the ksv fifo for num_downstream devices */ + int (*read_ksv_fifo)(struct intel_digital_port *dig_port, + int num_downstream, u8 *ksv_fifo); + + /* Reads a 32-bit part of V' from the receiver */ + int (*read_v_prime_part)(struct intel_digital_port *dig_port, + int i, u32 *part); + + /* Enables HDCP signalling on the port */ + int (*toggle_signalling)(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable); + + /* Enable/Disable stream encryption on DP MST Transport Link */ + int (*stream_encryption)(struct intel_connector *connector, + bool enable); + + /* Ensures the link is still protected */ + bool (*check_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); + + /* Detects panel's hdcp capability. This is optional for HDMI. */ + int (*hdcp_get_capability)(struct intel_digital_port *dig_port, + bool *hdcp_capable); + + /* HDCP adaptation(DP/HDMI) required on the port */ + enum hdcp_wired_protocol protocol; + + /* Detects whether sink is HDCP2.2 capable */ + int (*hdcp_2_2_get_capability)(struct intel_connector *connector, + bool *capable); + + /* Write HDCP2.2 messages */ + int (*write_2_2_msg)(struct intel_connector *connector, + void *buf, size_t size); + + /* Read HDCP2.2 messages */ + int (*read_2_2_msg)(struct intel_connector *connector, + u8 msg_id, void *buf, size_t size); + + /* + * Implementation of DP HDCP2.2 Errata for the communication of stream + * type to Receivers. In DP HDCP2.2 Stream type is one of the input to + * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. + */ + int (*config_stream_type)(struct intel_connector *connector, + bool is_repeater, u8 type); + + /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */ + int (*stream_2_2_encryption)(struct intel_connector *connector, + bool enable); + + /* HDCP2.2 Link Integrity Check */ + int (*check_2_2_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); + + /* HDCP remote sink cap */ + int (*get_remote_hdcp_capability)(struct intel_connector *connector, + bool *hdcp_capable, bool *hdcp2_capable); +}; + +#endif /* __INTEL_HDCP_SHIM_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index cd9ee171e0df..c6ce6bb88d7c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -38,8 +38,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include <drm/intel/intel_lpe_audio.h> +#include <media/cec-notifier.h> + #include "g4x_hdmi.h" #include "i915_drv.h" #include "i915_reg.h" @@ -55,9 +58,11 @@ #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" +#include "intel_hdcp_shim.h" #include "intel_hdmi.h" #include "intel_lspcon.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_snps_phy.h" static void @@ -1207,6 +1212,30 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, &crtc_state->infoframes.hdmi); } +void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(encoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, + crtc_state->cpu_transcoder); + u32 val = intel_de_read(display, reg); + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) == 0 && + (val & VIDEO_DIP_ENABLE_DRM_GLK) == 0) + return; + + val &= ~(VIDEO_DIP_ENABLE_DRM_GLK); + + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); + + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_DRM, + &crtc_state->infoframes.drm); +} + static void hsw_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -1310,8 +1339,8 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port, memcpy(&write_buf[1], buffer, size); msg.addr = DRM_HDCP_DDC_ADDR; - msg.flags = 0, - msg.len = size + 1, + msg.flags = 0; + msg.len = size + 1; msg.buf = write_buf; ret = i2c_transfer(ddc, &msg, 1); @@ -2053,7 +2082,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return status; } - return intel_mode_valid_max_plane_size(dev_priv, mode, false); + return intel_mode_valid_max_plane_size(dev_priv, mode, 1); } bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, @@ -2913,7 +2942,6 @@ static struct intel_encoder * get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; for_each_intel_encoder(display->drm, other) { @@ -2927,7 +2955,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) connector = enc_to_dig_port(other)->hdmi.attached_connector; - if (connector && connector->base.ddc == intel_gmbus_get_adapter(i915, ddc_pin)) + if (connector && connector->base.ddc == intel_gmbus_get_adapter(display, ddc_pin)) return other; } @@ -2937,7 +2965,6 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; const char *source; u8 ddc_pin; @@ -2950,7 +2977,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) source = "platform default"; } - if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) { + if (!intel_gmbus_is_valid_pin(display, ddc_pin)) { drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n", encoder->base.base.id, encoder->base.name, ddc_pin); @@ -3023,7 +3050,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; struct cec_connector_info conn_info; u8 ddc_pin; @@ -3048,7 +3074,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, drm_connector_init_with_ddc(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, - intel_gmbus_get_adapter(dev_priv, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); @@ -3073,7 +3099,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, intel_connector_attach_encoder(intel_connector, intel_encoder); intel_hdmi->attached_connector = intel_connector; - if (is_hdcp_supported(dev_priv, port)) { + if (is_hdcp_supported(display, port)) { int ret = intel_hdcp_init(intel_connector, dig_port, &intel_hdmi_hdcp_shim); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 9b97623665c5..466f48df8a74 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -42,6 +42,9 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, u32 intel_hdmi_infoframe_enable(unsigned int type); void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); +void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); void intel_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index d9ec349f3c8c..a013b0e0ef54 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -21,8 +21,11 @@ * IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/kernel.h> +#include <drm/drm_probe_helper.h> + #include "i915_drv.h" #include "i915_irq.h" #include "intel_display_power.h" diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 2c4e946d5575..cb64c6f0ad1b 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -556,6 +556,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; @@ -589,11 +590,12 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_ICP) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { + struct intel_display *display = &dev_priv->display; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -625,7 +627,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) - intel_gmbus_irq_handler(dev_priv); + intel_gmbus_irq_handler(display); } void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) @@ -849,10 +851,11 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - else - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + /* + * We reduce the value to 250us to be able to detect SHPD when an external display + * is connected. This is also expected of us as stated in DP1.4a Table 3-4. + */ + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -1060,6 +1063,10 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915) enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + /* + * Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the + * SHPD_FILTER_CNT value should be. + */ intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); mtp_hpd_invert(i915); diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index e7a9b860fac6..c87cd1d16d0a 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -26,7 +26,6 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, struct intel_link_bw_limits *limits) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe; limits->force_fec_pipes = 0; @@ -34,7 +33,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state, for_each_pipe(display, pipe) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, - intel_crtc_for_pipe(i915, pipe)); + intel_crtc_for_pipe(display, pipe)); if (state->base.duplicated && crtc_state) { limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index fb4ed9f7855b..6d7637ad980a 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -37,6 +37,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -51,6 +52,7 @@ #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "intel_pps_regs.h" /* Private structure for the integrated LVDS support */ @@ -263,7 +265,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, temp |= LVDS_PIPE_SEL(pipe); } - /* set the corresponsding LVDS_BORDER bit */ + /* set the corresponding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; temp |= crtc_state->gmch_pfit.lvds_border_bits; @@ -899,7 +901,7 @@ void intel_lvds_init(struct drm_i915_private *i915) drm_connector_init_with_ddc(&i915->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS, - intel_gmbus_get_adapter(i915, ddc_pin)); + intel_gmbus_get_adapter(display, ddc_pin)); drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 72694dde3c22..2c8668b1ebae 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_vblank.h> #include "i915_drv.h" #include "i915_reg.h" @@ -221,6 +222,7 @@ static u8 get_transcoder_pipes(struct drm_i915_private *i915, static void get_portsync_pipes(struct intel_crtc *crtc, u8 *master_pipe_mask, u8 *slave_pipes_mask) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -243,7 +245,7 @@ static void get_portsync_pipes(struct intel_crtc *crtc, *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder)); drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask)); - master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1); + master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1); master_crtc_state = to_intel_crtc_state(master_crtc->base.state); *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask); } @@ -375,6 +377,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state static void intel_sanitize_plane_mapping(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_crtc *crtc; if (DISPLAY_VER(i915) >= 4) @@ -396,7 +399,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915) "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); - plane_crtc = intel_crtc_for_pipe(i915, pipe); + plane_crtc = intel_crtc_for_pipe(display, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -490,8 +493,8 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc, } /* Disable any background color/etc. set by the BIOS */ - intel_color_commit_noarm(crtc_state); - intel_color_commit_arm(crtc_state); + intel_color_commit_noarm(NULL, crtc_state); + intel_color_commit_arm(NULL, crtc_state); } if (!crtc_state->hw.active || @@ -662,6 +665,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_plane *plane; struct intel_crtc *crtc; @@ -674,7 +678,7 @@ static void readout_plane_state(struct drm_i915_private *i915) visible = plane->get_hw_state(plane, &pipe); - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); @@ -695,6 +699,7 @@ static void readout_plane_state(struct drm_i915_private *i915) static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = @@ -743,7 +748,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; @@ -955,6 +960,7 @@ static void intel_early_display_was(struct drm_i915_private *i915) void intel_modeset_setup_hw_state(struct drm_i915_private *i915, struct drm_modeset_acquire_ctx *ctx) { + struct intel_display *display = &i915->display; struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; @@ -982,7 +988,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, drm_crtc_vblank_reset(&crtc->base); if (crtc_state->hw.active) { - intel_dmc_enable_pipe(i915, crtc->pipe); + intel_dmc_enable_pipe(display, crtc->pipe); intel_crtc_vblank_on(crtc_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index 3491db5cad31..bc70e72ccc2e 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -27,6 +27,7 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_display *display = to_intel_display(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", @@ -35,29 +36,29 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta if (connector->get_hw_state(connector)) { struct intel_encoder *encoder = intel_attached_encoder(connector); - I915_STATE_WARN(i915, !crtc_state, - "connector enabled without attached crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state, + "connector enabled without attached crtc\n"); if (!crtc_state) return; - I915_STATE_WARN(i915, !crtc_state->hw.active, - "connector is active, but attached crtc isn't\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state->hw.active, + "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; - I915_STATE_WARN(i915, - conn_state->best_encoder != &encoder->base, - "atomic encoder doesn't match attached encoder\n"); + INTEL_DISPLAY_STATE_WARN(display, + conn_state->best_encoder != &encoder->base, + "atomic encoder doesn't match attached encoder\n"); - I915_STATE_WARN(i915, conn_state->crtc != encoder->base.crtc, - "attached encoder crtc differs from connector crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, conn_state->crtc != encoder->base.crtc, + "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(i915, crtc_state && crtc_state->hw.active, - "attached crtc is active, but connector isn't\n"); - I915_STATE_WARN(i915, !crtc_state && conn_state->best_encoder, - "best encoder set without crtc!\n"); + INTEL_DISPLAY_STATE_WARN(display, crtc_state && crtc_state->hw.active, + "attached crtc is active, but connector isn't\n"); + INTEL_DISPLAY_STATE_WARN(display, !crtc_state && conn_state->best_encoder, + "best encoder set without crtc!\n"); } } @@ -65,6 +66,7 @@ static void verify_connector_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_connector *connector; const struct drm_connector_state *new_conn_state; int i; @@ -81,8 +83,8 @@ verify_connector_state(struct intel_atomic_state *state, intel_connector_verify_state(crtc_state, new_conn_state); - I915_STATE_WARN(to_i915(connector->dev), new_conn_state->best_encoder != encoder, - "connector's atomic encoder doesn't match legacy encoder\n"); + INTEL_DISPLAY_STATE_WARN(display, new_conn_state->best_encoder != encoder, + "connector's atomic encoder doesn't match legacy encoder\n"); } } @@ -109,6 +111,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s static void verify_encoder_state(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_encoder *encoder; struct drm_connector *connector; @@ -134,25 +137,25 @@ verify_encoder_state(struct intel_atomic_state *state) found = true; enabled = true; - I915_STATE_WARN(i915, - new_conn_state->crtc != encoder->base.crtc, - "connector's crtc doesn't match encoder crtc\n"); + INTEL_DISPLAY_STATE_WARN(display, + new_conn_state->crtc != encoder->base.crtc, + "connector's crtc doesn't match encoder crtc\n"); } if (!found) continue; - I915_STATE_WARN(i915, !!encoder->base.crtc != enabled, - "encoder's enabled state mismatch (expected %i, found %i)\n", - !!encoder->base.crtc, enabled); + INTEL_DISPLAY_STATE_WARN(display, !!encoder->base.crtc != enabled, + "encoder's enabled state mismatch (expected %i, found %i)\n", + !!encoder->base.crtc, enabled); if (!encoder->base.crtc) { bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(i915, active, - "encoder detached but still enabled on pipe %c.\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, active, + "encoder detached but still enabled on pipe %c.\n", + pipe_name(pipe)); } } } @@ -161,8 +164,8 @@ static void verify_crtc_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *i915 = to_i915(dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_crtc_state *sw_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_crtc_state *hw_crtc_state; @@ -173,7 +176,7 @@ verify_crtc_state(struct intel_atomic_state *state, if (!hw_crtc_state) return; - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, + drm_dbg_kms(display->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); hw_crtc_state->hw.enable = sw_crtc_state->hw.enable; @@ -184,30 +187,30 @@ verify_crtc_state(struct intel_atomic_state *state, if (IS_I830(i915) && hw_crtc_state->hw.active) hw_crtc_state->hw.active = sw_crtc_state->hw.active; - I915_STATE_WARN(i915, - sw_crtc_state->hw.active != hw_crtc_state->hw.active, - "crtc active state doesn't match with hw state (expected %i, found %i)\n", - sw_crtc_state->hw.active, hw_crtc_state->hw.active); + INTEL_DISPLAY_STATE_WARN(display, + sw_crtc_state->hw.active != hw_crtc_state->hw.active, + "crtc active state doesn't match with hw state (expected %i, found %i)\n", + sw_crtc_state->hw.active, hw_crtc_state->hw.active); - I915_STATE_WARN(i915, crtc->active != sw_crtc_state->hw.active, - "transitional active state does not match atomic hw state (expected %i, found %i)\n", - sw_crtc_state->hw.active, crtc->active); + INTEL_DISPLAY_STATE_WARN(display, crtc->active != sw_crtc_state->hw.active, + "transitional active state does not match atomic hw state (expected %i, found %i)\n", + sw_crtc_state->hw.active, crtc->active); primary_crtc = intel_primary_crtc(sw_crtc_state); - for_each_encoder_on_crtc(dev, &primary_crtc->base, encoder) { + for_each_encoder_on_crtc(display->drm, &primary_crtc->base, encoder) { enum pipe pipe; bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(i915, active != sw_crtc_state->hw.active, - "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, - sw_crtc_state->hw.active); + INTEL_DISPLAY_STATE_WARN(display, active != sw_crtc_state->hw.active, + "[ENCODER:%i] active %i with crtc active %i\n", + encoder->base.base.id, active, + sw_crtc_state->hw.active); - I915_STATE_WARN(i915, active && primary_crtc->pipe != pipe, - "Encoder connected to wrong pipe %c\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, active && primary_crtc->pipe != pipe, + "Encoder connected to wrong pipe %c\n", + pipe_name(pipe)); if (active) intel_encoder_get_config(encoder, hw_crtc_state); @@ -220,7 +223,7 @@ verify_crtc_state(struct intel_atomic_state *state, if (!intel_pipe_config_compare(sw_crtc_state, hw_crtc_state, false)) { - I915_STATE_WARN(i915, 1, "pipe state doesn't match!\n"); + INTEL_DISPLAY_STATE_WARN(display, 1, "pipe state doesn't match!\n"); intel_crtc_state_dump(hw_crtc_state, NULL, "hw state"); intel_crtc_state_dump(sw_crtc_state, NULL, "sw state"); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index ff11836459de..0eaa6cd6fe80 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -26,6 +26,7 @@ */ #include <linux/acpi.h> +#include <linux/debugfs.h> #include <linux/dmi.h> #include <acpi/video.h> diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 06b1122ec13e..2ec14096ba9c 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -294,7 +294,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); if (vma) - frontbuffer = intel_frontbuffer_get(vma->obj); + frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj)); intel_frontbuffer_track(overlay->frontbuffer, frontbuffer, INTEL_FRONTBUFFER_OVERLAY(pipe)); @@ -1457,18 +1457,19 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -struct intel_overlay_error_state { +struct intel_overlay_snapshot { struct overlay_registers regs; unsigned long base; u32 dovsta; u32 isr; }; -struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) +struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_overlay *overlay = dev_priv->display.overlay; - struct intel_overlay_error_state *error; + struct intel_overlay_snapshot *error; if (!overlay || !overlay->active) return NULL; @@ -1487,9 +1488,12 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) } void -intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error) +intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p) { + if (!error) + return; + drm_printf(p, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", error->dovsta, error->isr); drm_printf(p, " Register file at 0x%08lx:\n", error->base); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h index f28a09c062d0..eafac24d1de8 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.h +++ b/drivers/gpu/drm/i915/display/intel_overlay.h @@ -6,12 +6,15 @@ #ifndef __INTEL_OVERLAY_H__ #define __INTEL_OVERLAY_H__ +#include <linux/types.h> + struct drm_device; struct drm_file; struct drm_i915_private; struct drm_printer; +struct intel_display; struct intel_overlay; -struct intel_overlay_error_state; +struct intel_overlay_snapshot; #ifdef I915 void intel_overlay_setup(struct drm_i915_private *dev_priv); @@ -22,10 +25,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); -struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); -void intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error); #else static inline void intel_overlay_setup(struct drm_i915_private *dev_priv) { @@ -50,13 +49,21 @@ static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, static inline void intel_overlay_reset(struct drm_i915_private *dev_priv) { } -static inline struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) +#endif + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && defined(I915) +struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display); +void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p); +#else +static inline struct intel_overlay_snapshot * +intel_overlay_snapshot_capture(struct intel_display *display) { return NULL; } -static inline void intel_overlay_print_error_state(struct drm_printer *p, - struct intel_overlay_error_state *error) +static inline void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error, + struct drm_printer *p) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 71454ddef20f..313bd3f35ace 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -33,22 +33,19 @@ #include <drm/drm_edid.h> -#include "i915_reg.h" +#include "i915_drv.h" #include "intel_backlight.h" #include "intel_connector.h" -#include "intel_de.h" +#include "intel_display_core.h" #include "intel_display_driver.h" #include "intel_display_types.h" #include "intel_drrs.h" -#include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" #include "intel_vrr.h" -bool intel_panel_use_ssc(struct drm_i915_private *i915) +bool intel_panel_use_ssc(struct intel_display *display) { - struct intel_display *display = &i915->display; - if (display->params.panel_use_ssc >= 0) return display->params.panel_use_ssc != 0; return display->vbt.lvds_use_ssc && @@ -252,7 +249,7 @@ int intel_panel_compute_config(struct intel_connector *connector, static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *preferred_mode = intel_panel_preferred_fixed_mode(connector); struct drm_display_mode *mode, *next; @@ -261,7 +258,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect if (!is_alt_fixed_mode(mode, preferred_mode)) continue; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); @@ -272,7 +269,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_mode *scan, *fixed_mode = NULL; if (list_empty(&connector->base.probed_modes)) @@ -290,7 +287,7 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto fixed_mode = list_first_entry(&connector->base.probed_modes, typeof(*fixed_mode), head); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first", @@ -303,16 +300,16 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto static void intel_panel_destroy_probed_modes(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); list_del(&mode->head); - drm_mode_destroy(&i915->drm, mode); + drm_mode_destroy(display->drm, mode); } } @@ -329,7 +326,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, struct drm_display_mode *fixed_mode, const char *type) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct drm_display_info *info = &connector->base.display_info; if (!fixed_mode) @@ -340,7 +337,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, info->width_mm = fixed_mode->width_mm; info->height_mm = fixed_mode->height_mm; - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, type, DRM_MODE_ARG(fixed_mode)); @@ -349,7 +346,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector, void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *mode; mode = connector->panel.vbt.lfp_vbt_mode; @@ -357,13 +354,13 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) return; intel_panel_add_fixed_mode(connector, - drm_mode_duplicate(&i915->drm, mode), + drm_mode_duplicate(display->drm, mode), "VBT LFP"); } void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); const struct drm_display_mode *mode; mode = connector->panel.vbt.sdvo_lvds_vbt_mode; @@ -371,7 +368,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) return; intel_panel_add_fixed_mode(connector, - drm_mode_duplicate(&i915->drm, mode), + drm_mode_duplicate(display->drm, mode), "VBT SDVO"); } @@ -383,301 +380,6 @@ void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, "current (BIOS)"); } -/* adjusted_mode has been preset to be the panel's fixed mode */ -static int pch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - int x, y, width, height; - - /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_src_h && - crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) - return 0; - - switch (conn_state->scaling_mode) { - case DRM_MODE_SCALE_CENTER: - width = pipe_src_w; - height = pipe_src_h; - x = (adjusted_mode->crtc_hdisplay - width + 1)/2; - y = (adjusted_mode->crtc_vdisplay - height + 1)/2; - break; - - case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the aspect ratio */ - { - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - if (scaled_width > scaled_height) { /* pillar */ - width = scaled_height / pipe_src_h; - if (width & 1) - width++; - x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; - y = 0; - height = adjusted_mode->crtc_vdisplay; - } else if (scaled_width < scaled_height) { /* letter */ - height = scaled_width / pipe_src_w; - if (height & 1) - height++; - y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; - x = 0; - width = adjusted_mode->crtc_hdisplay; - } else { - x = y = 0; - width = adjusted_mode->crtc_hdisplay; - height = adjusted_mode->crtc_vdisplay; - } - } - break; - - case DRM_MODE_SCALE_NONE: - WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w); - WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h); - fallthrough; - case DRM_MODE_SCALE_FULLSCREEN: - x = y = 0; - width = adjusted_mode->crtc_hdisplay; - height = adjusted_mode->crtc_vdisplay; - break; - - default: - MISSING_CASE(conn_state->scaling_mode); - return -EINVAL; - } - - drm_rect_init(&crtc_state->pch_pfit.dst, - x, y, width, height); - crtc_state->pch_pfit.enabled = true; - - return 0; -} - -static void -centre_horizontally(struct drm_display_mode *adjusted_mode, - int width) -{ - u32 border, sync_pos, blank_width, sync_width; - - /* keep the hsync and hblank widths constant */ - sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; - blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; - sync_pos = (blank_width - sync_width + 1) / 2; - - border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; - border += border & 1; /* make the border even */ - - adjusted_mode->crtc_hdisplay = width; - adjusted_mode->crtc_hblank_start = width + border; - adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; - - adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; - adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; -} - -static void -centre_vertically(struct drm_display_mode *adjusted_mode, - int height) -{ - u32 border, sync_pos, blank_width, sync_width; - - /* keep the vsync and vblank widths constant */ - sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; - blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; - sync_pos = (blank_width - sync_width + 1) / 2; - - border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; - - adjusted_mode->crtc_vdisplay = height; - adjusted_mode->crtc_vblank_start = height + border; - adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; - - adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; - adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; -} - -static u32 panel_fitter_scaling(u32 source, u32 target) -{ - /* - * Floating point operation is not supported. So the FACTOR - * is defined, which can avoid the floating point computation - * when calculating the panel ratio. - */ -#define ACCURACY 12 -#define FACTOR (1 << ACCURACY) - u32 ratio = source * FACTOR / target; - return (FACTOR * ratio + FACTOR/2) / FACTOR; -} - -static void i965_scale_aspect(struct intel_crtc_state *crtc_state, - u32 *pfit_control) -{ - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - - /* 965+ is easy, it does everything in hw */ - if (scaled_width > scaled_height) - *pfit_control |= PFIT_ENABLE | - PFIT_SCALING_PILLAR; - else if (scaled_width < scaled_height) - *pfit_control |= PFIT_ENABLE | - PFIT_SCALING_LETTER; - else if (adjusted_mode->crtc_hdisplay != pipe_src_w) - *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; -} - -static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, - u32 *pfit_control, u32 *pfit_pgm_ratios, - u32 *border) -{ - struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; - u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; - u32 bits; - - /* - * For earlier chips we have to calculate the scaling - * ratio by hand and program it into the - * PFIT_PGM_RATIO register - */ - if (scaled_width > scaled_height) { /* pillar */ - centre_horizontally(adjusted_mode, - scaled_height / pipe_src_h); - - *border = LVDS_BORDER_ENABLE; - if (pipe_src_h != adjusted_mode->crtc_vdisplay) { - bits = panel_fitter_scaling(pipe_src_h, - adjusted_mode->crtc_vdisplay); - - *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | - PFIT_VERT_SCALE(bits)); - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } - } else if (scaled_width < scaled_height) { /* letter */ - centre_vertically(adjusted_mode, - scaled_width / pipe_src_w); - - *border = LVDS_BORDER_ENABLE; - if (pipe_src_w != adjusted_mode->crtc_hdisplay) { - bits = panel_fitter_scaling(pipe_src_w, - adjusted_mode->crtc_hdisplay); - - *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | - PFIT_VERT_SCALE(bits)); - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } - } else { - /* Aspects match, Let hw scale both directions */ - *pfit_control |= (PFIT_ENABLE | - PFIT_VERT_AUTO_SCALE | - PFIT_HORIZ_AUTO_SCALE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_INTERP_BILINEAR); - } -} - -static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; - struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); - int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); - - /* Native modes don't need fitting */ - if (adjusted_mode->crtc_hdisplay == pipe_src_w && - adjusted_mode->crtc_vdisplay == pipe_src_h) - goto out; - - switch (conn_state->scaling_mode) { - case DRM_MODE_SCALE_CENTER: - /* - * For centered modes, we have to calculate border widths & - * heights and modify the values programmed into the CRTC. - */ - centre_horizontally(adjusted_mode, pipe_src_w); - centre_vertically(adjusted_mode, pipe_src_h); - border = LVDS_BORDER_ENABLE; - break; - case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the aspect ratio */ - if (DISPLAY_VER(dev_priv) >= 4) - i965_scale_aspect(crtc_state, &pfit_control); - else - i9xx_scale_aspect(crtc_state, &pfit_control, - &pfit_pgm_ratios, &border); - break; - case DRM_MODE_SCALE_FULLSCREEN: - /* - * Full scaling, even if it changes the aspect ratio. - * Fortunately this is all done for us in hw. - */ - if (pipe_src_h != adjusted_mode->crtc_vdisplay || - pipe_src_w != adjusted_mode->crtc_hdisplay) { - pfit_control |= PFIT_ENABLE; - if (DISPLAY_VER(dev_priv) >= 4) - pfit_control |= PFIT_SCALING_AUTO; - else - pfit_control |= (PFIT_VERT_AUTO_SCALE | - PFIT_VERT_INTERP_BILINEAR | - PFIT_HORIZ_AUTO_SCALE | - PFIT_HORIZ_INTERP_BILINEAR); - } - break; - default: - MISSING_CASE(conn_state->scaling_mode); - return -EINVAL; - } - - /* 965+ wants fuzzy fitting */ - /* FIXME: handle multiple panels by failing gracefully */ - if (DISPLAY_VER(dev_priv) >= 4) - pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; - -out: - if ((pfit_control & PFIT_ENABLE) == 0) { - pfit_control = 0; - pfit_pgm_ratios = 0; - } - - /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18) - pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; - - crtc_state->gmch_pfit.control = pfit_control; - crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; - crtc_state->gmch_pfit.lvds_border_bits = border; - - return 0; -} - -int intel_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - - if (HAS_GMCH(i915)) - return gmch_panel_fitting(crtc_state, conn_state); - else - return pch_panel_fitting(crtc_state, conn_state); -} - enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 15a8c897b33f..b60d12322e5d 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -14,9 +14,9 @@ struct drm_connector; struct drm_connector_state; struct drm_display_mode; struct drm_edid; -struct drm_i915_private; struct intel_connector; struct intel_crtc_state; +struct intel_display; struct intel_encoder; void intel_panel_init_alloc(struct intel_connector *connector); @@ -25,7 +25,7 @@ int intel_panel_init(struct intel_connector *connector, void intel_panel_fini(struct intel_connector *connector); enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force); -bool intel_panel_use_ssc(struct drm_i915_private *i915); +bool intel_panel_use_ssc(struct intel_display *display); const struct drm_display_mode * intel_panel_preferred_fixed_mode(struct intel_connector *connector); const struct drm_display_mode * @@ -42,8 +42,6 @@ enum drrs_type intel_panel_drrs_type(struct intel_connector *connector); enum drm_mode_status intel_panel_mode_valid(struct intel_connector *connector, const struct drm_display_mode *mode); -int intel_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); int intel_panel_compute_config(struct intel_connector *connector, struct drm_display_mode *adjusted_mode); void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index f13ab680c2cf..4210de87a0a2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -39,58 +39,61 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t dp_reg) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; bool state; state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - I915_STATE_WARN(dev_priv, state && port_pipe == pipe, - "PCH DP %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH DP %c still using transcoder B\n", - port_name(port)); + INTEL_DISPLAY_STATE_WARN(display, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t hdmi_reg) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; bool state; state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - I915_STATE_WARN(dev_priv, state && port_pipe == pipe, - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH HDMI %c still using transcoder B\n", - port_name(port)); + INTEL_DISPLAY_STATE_WARN(display, + HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_display *display = &dev_priv->display; enum pipe port_pipe; assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - I915_STATE_WARN(dev_priv, - intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && port_pipe == pipe, - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, + intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); - I915_STATE_WARN(dev_priv, - intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, + intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); /* PCH SDVOB multiplex with HDMIB */ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); @@ -101,14 +104,15 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_display *display = &dev_priv->display; u32 val; bool enabled; - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); + val = intel_de_read(display, PCH_TRANSCONF(pipe)); enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(dev_priv, enabled, - "transcoder assertion failed, should be off on pipe %c but is still active\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); } static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 713cfba71475..84c55971e91a 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -491,6 +491,7 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_encoder *encoder; struct intel_shared_dpll *pll; int i; @@ -572,11 +573,11 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) if (has_panel) { final |= DREF_SSC_SOURCE_ENABLE; - if (intel_panel_use_ssc(dev_priv) && can_ssc) + if (intel_panel_use_ssc(display) && can_ssc) final |= DREF_SSC1_ENABLE; if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) + if (intel_panel_use_ssc(display) && can_ssc) final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; else final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; @@ -604,7 +605,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) val |= DREF_SSC_SOURCE_ENABLE; /* SSC must be turned on before enabling the CPU output */ - if (intel_panel_use_ssc(dev_priv) && can_ssc) { + if (intel_panel_use_ssc(display) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); val |= DREF_SSC1_ENABLE; } else { @@ -620,7 +621,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) { + if (intel_panel_use_ssc(display) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on eDP\n"); val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c new file mode 100644 index 000000000000..50861aa78a89 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pfit.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_display_core.h" +#include "intel_display_driver.h" +#include "intel_display_types.h" +#include "intel_lvds_regs.h" +#include "intel_pfit.h" + +static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; + int width = drm_rect_width(dst); + int height = drm_rect_height(dst); + int x = dst->x1; + int y = dst->y1; + + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE && + (y & 1 || height & 1)) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") misaligned for interlaced output\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + /* + * "Restriction : When pipe scaling is enabled, the scaled + * output must equal the pipe active area, so Pipe active + * size = (2 * PF window position) + PF window size." + * + * The vertical direction seems more forgiving than the + * horizontal direction, but still has some issues so + * let's follow the same hard rule for both. + */ + if (adjusted_mode->crtc_hdisplay != 2 * x + width || + adjusted_mode->crtc_vdisplay != 2 * y + height) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") not centered\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + /* + * "Restriction : The X position must not be programmed + * to be 1 (28:16=0 0000 0000 0001b)." + */ + if (x == 1) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") badly positioned\n", + crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst)); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_src_size(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int max_src_w, max_src_h; + + if (DISPLAY_VER(display) >= 8) { + max_src_w = 4096; + max_src_h = 4096; + } else if (DISPLAY_VER(display) >= 7) { + /* + * PF0 7x5 capable + * PF1 3x3 capable (could be switched to 7x5 + * mode on HSW when PF2 unused) + * PF2 3x3 capable + * + * This assumes we use a 1:1 mapping between pipe and PF. + */ + max_src_w = crtc->pipe == PIPE_A ? 4096 : 2048; + max_src_h = 4096; + } else { + max_src_w = 4096; + max_src_h = 4096; + } + + if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] source size (%dx%d) exceeds pfit max (%dx%d)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, pipe_src_h, max_src_w, max_src_h); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_scaling(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int hscale, vscale, max_scale = 0x12000; /* 1.125 */ + struct drm_rect src; + + drm_rect_init(&src, 0, 0, pipe_src_w << 16, pipe_src_h << 16); + + hscale = drm_rect_calc_hscale(&src, dst, 0, max_scale); + if (hscale < 0) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) exceeds max (0x%x)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, drm_rect_width(dst), + max_scale); + return hscale; + } + + vscale = drm_rect_calc_vscale(&src, dst, 0, max_scale); + if (vscale < 0) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) exceeds max (0x%x)\n", + crtc->base.base.id, crtc->base.name, + pipe_src_h, drm_rect_height(dst), + max_scale); + return vscale; + } + + return 0; +} + +static int intel_pch_pfit_check_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (adjusted_mode->crtc_vdisplay < 7) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_vdisplay, 7); + return -EINVAL; + } + + return 0; +} + +static int intel_pch_pfit_check_cloning(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + /* + * The panel fitter is in the pipe and thus would affect every + * cloned output. The relevant properties (scaling mode, TV + * margins) are per-connector so we'd have to make sure each + * output sets them up identically. Seems like a very niche use + * case so let's just reject cloning entirely when pfit is used. + */ + if (crtc_state->uapi.encoder_mask && + !is_power_of_2(crtc_state->uapi.encoder_mask)) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] no pfit when cloning\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; + } + + return 0; +} + +/* adjusted_mode has been preset to be the panel's fixed mode */ +static int pch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + int ret, x, y, width, height; + + /* Native modes don't need fitting */ + if (adjusted_mode->crtc_hdisplay == pipe_src_w && + adjusted_mode->crtc_vdisplay == pipe_src_h && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) + return 0; + + switch (conn_state->scaling_mode) { + case DRM_MODE_SCALE_CENTER: + width = pipe_src_w; + height = pipe_src_h; + x = (adjusted_mode->crtc_hdisplay - width + 1)/2; + y = (adjusted_mode->crtc_vdisplay - height + 1)/2; + break; + + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + { + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + + if (scaled_width > scaled_height) { /* pillar */ + width = scaled_height / pipe_src_h; + if (width & 1) + width++; + x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; + y = 0; + height = adjusted_mode->crtc_vdisplay; + } else if (scaled_width < scaled_height) { /* letter */ + height = scaled_width / pipe_src_w; + if (height & 1) + height++; + y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; + x = 0; + width = adjusted_mode->crtc_hdisplay; + } else { + x = y = 0; + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + } + } + break; + + case DRM_MODE_SCALE_NONE: + WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w); + WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h); + fallthrough; + case DRM_MODE_SCALE_FULLSCREEN: + x = y = 0; + width = adjusted_mode->crtc_hdisplay; + height = adjusted_mode->crtc_vdisplay; + break; + + default: + MISSING_CASE(conn_state->scaling_mode); + return -EINVAL; + } + + drm_rect_init(&crtc_state->pch_pfit.dst, + x, y, width, height); + crtc_state->pch_pfit.enabled = true; + + /* + * SKL+ have unified scalers for pipes/planes so the + * checks are done in a single place for all scalers. + */ + if (DISPLAY_VER(display) >= 9) + return 0; + + ret = intel_pch_pfit_check_dst_window(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_src_size(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_scaling(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_timings(crtc_state); + if (ret) + return ret; + + ret = intel_pch_pfit_check_cloning(crtc_state); + if (ret) + return ret; + + return 0; +} + +static void +centre_horizontally(struct drm_display_mode *adjusted_mode, + int width) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the hsync and hblank widths constant */ + sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; + blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; + border += border & 1; /* make the border even */ + + adjusted_mode->crtc_hdisplay = width; + adjusted_mode->crtc_hblank_start = width + border; + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; + + adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; + adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; +} + +static void +centre_vertically(struct drm_display_mode *adjusted_mode, + int height) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the vsync and vblank widths constant */ + sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; + blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; + + adjusted_mode->crtc_vdisplay = height; + adjusted_mode->crtc_vblank_start = height + border; + adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; + + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; + adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; +} + +static u32 panel_fitter_scaling(u32 source, u32 target) +{ + /* + * Floating point operation is not supported. So the FACTOR + * is defined, which can avoid the floating point computation + * when calculating the panel ratio. + */ +#define ACCURACY 12 +#define FACTOR (1 << ACCURACY) + u32 ratio = source * FACTOR / target; + return (FACTOR * ratio + FACTOR/2) / FACTOR; +} + +static void i965_scale_aspect(struct intel_crtc_state *crtc_state, + u32 *pfit_control) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + + /* 965+ is easy, it does everything in hw */ + if (scaled_width > scaled_height) + *pfit_control |= PFIT_ENABLE | + PFIT_SCALING_PILLAR; + else if (scaled_width < scaled_height) + *pfit_control |= PFIT_ENABLE | + PFIT_SCALING_LETTER; + else if (adjusted_mode->crtc_hdisplay != pipe_src_w) + *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; +} + +static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, + u32 *pfit_control, u32 *pfit_pgm_ratios, + u32 *border) +{ + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; + u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; + u32 bits; + + /* + * For earlier chips we have to calculate the scaling + * ratio by hand and program it into the + * PFIT_PGM_RATIO register + */ + if (scaled_width > scaled_height) { /* pillar */ + centre_horizontally(adjusted_mode, + scaled_height / pipe_src_h); + + *border = LVDS_BORDER_ENABLE; + if (pipe_src_h != adjusted_mode->crtc_vdisplay) { + bits = panel_fitter_scaling(pipe_src_h, + adjusted_mode->crtc_vdisplay); + + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } + } else if (scaled_width < scaled_height) { /* letter */ + centre_vertically(adjusted_mode, + scaled_width / pipe_src_w); + + *border = LVDS_BORDER_ENABLE; + if (pipe_src_w != adjusted_mode->crtc_hdisplay) { + bits = panel_fitter_scaling(pipe_src_w, + adjusted_mode->crtc_hdisplay); + + *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | + PFIT_VERT_SCALE(bits)); + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } + } else { + /* Aspects match, Let hw scale both directions */ + *pfit_control |= (PFIT_ENABLE | + PFIT_VERT_AUTO_SCALE | + PFIT_HORIZ_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_INTERP_BILINEAR); + } +} + +static int intel_gmch_pfit_check_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int min; + + if (DISPLAY_VER(display) >= 4) + min = 3; + else + min = 2; + + if (adjusted_mode->crtc_hdisplay < min) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] horizontal active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_hdisplay, min); + return -EINVAL; + } + + if (adjusted_mode->crtc_vdisplay < min) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n", + crtc->base.base.id, crtc->base.name, + adjusted_mode->crtc_vdisplay, min); + return -EINVAL; + } + + return 0; +} + +static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); + int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); + + /* Native modes don't need fitting */ + if (adjusted_mode->crtc_hdisplay == pipe_src_w && + adjusted_mode->crtc_vdisplay == pipe_src_h) + goto out; + + /* + * TODO: implement downscaling for i965+. Need to account + * for downscaling in intel_crtc_compute_pixel_rate(). + */ + if (adjusted_mode->crtc_hdisplay < pipe_src_w) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) not supported\n", + crtc->base.base.id, crtc->base.name, + pipe_src_w, adjusted_mode->crtc_hdisplay); + return -EINVAL; + } + if (adjusted_mode->crtc_vdisplay < pipe_src_h) { + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) not supported\n", + crtc->base.base.id, crtc->base.name, + pipe_src_h, adjusted_mode->crtc_vdisplay); + return -EINVAL; + } + + switch (conn_state->scaling_mode) { + case DRM_MODE_SCALE_CENTER: + /* + * For centered modes, we have to calculate border widths & + * heights and modify the values programmed into the CRTC. + */ + centre_horizontally(adjusted_mode, pipe_src_w); + centre_vertically(adjusted_mode, pipe_src_h); + border = LVDS_BORDER_ENABLE; + break; + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + if (DISPLAY_VER(display) >= 4) + i965_scale_aspect(crtc_state, &pfit_control); + else + i9xx_scale_aspect(crtc_state, &pfit_control, + &pfit_pgm_ratios, &border); + break; + case DRM_MODE_SCALE_FULLSCREEN: + /* + * Full scaling, even if it changes the aspect ratio. + * Fortunately this is all done for us in hw. + */ + if (pipe_src_h != adjusted_mode->crtc_vdisplay || + pipe_src_w != adjusted_mode->crtc_hdisplay) { + pfit_control |= PFIT_ENABLE; + if (DISPLAY_VER(display) >= 4) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (PFIT_VERT_AUTO_SCALE | + PFIT_VERT_INTERP_BILINEAR | + PFIT_HORIZ_AUTO_SCALE | + PFIT_HORIZ_INTERP_BILINEAR); + } + break; + default: + MISSING_CASE(conn_state->scaling_mode); + return -EINVAL; + } + + /* 965+ wants fuzzy fitting */ + /* FIXME: handle multiple panels by failing gracefully */ + if (DISPLAY_VER(display) >= 4) + pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; + +out: + if ((pfit_control & PFIT_ENABLE) == 0) { + pfit_control = 0; + pfit_pgm_ratios = 0; + } + + /* Make sure pre-965 set dither correctly for 18bpp panels. */ + if (DISPLAY_VER(display) < 4 && crtc_state->pipe_bpp == 18) + pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; + + crtc_state->gmch_pfit.control = pfit_control; + crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; + crtc_state->gmch_pfit.lvds_border_bits = border; + + if ((pfit_control & PFIT_ENABLE) == 0) + return 0; + + return intel_gmch_pfit_check_timings(crtc_state); +} + +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (HAS_GMCH(display)) + return gmch_panel_fitting(crtc_state, conn_state); + else + return pch_panel_fitting(crtc_state, conn_state); +} diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h new file mode 100644 index 000000000000..add8d78de2c9 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pfit.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __INTEL_PFIT_H__ +#define __INTEL_PFIT_H__ + +struct drm_connector_state; +struct intel_crtc_state; + +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + +#endif /* __INTEL_PFIT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 82ceede0b2b1..304da826dee1 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -32,6 +32,7 @@ #include "i915_reg.h" #include "intel_atomic.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_pipe_crc.h" #include "intel_pipe_crc_regs.h" @@ -285,6 +286,9 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) struct drm_modeset_acquire_ctx ctx; int ret; + if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) + i915gm_irq_cstate_wa(dev_priv, enable); + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(&dev_priv->drm); diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index ada1792df5b3..62401f6a04e4 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -302,7 +302,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(to_intel_framebuffer(fb), - vma->obj, &mode_cmd)) { + intel_bo_to_drm_bo(vma->obj), &mode_cmd)) { drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); goto err_vma; } diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index ceaf9e3147da..cdd314956a31 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -92,7 +92,7 @@ int intel_pmdemand_init(struct drm_i915_private *i915) &pmdemand_state->base, &intel_pmdemand_funcs); - if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) + if (IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) /* Wa_14016740474 */ intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *new_bw_state, *old_bw_state; const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; const struct intel_crtc_state *new_crtc_state, *old_crtc_state; @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) new_dbuf_state = intel_atomic_get_new_dbuf_state(state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (new_dbuf_state && - (new_dbuf_state->active_pipes != - old_dbuf_state->active_pipes || - new_dbuf_state->enabled_slices != - old_dbuf_state->enabled_slices)) + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) return true; + if (DISPLAY_VER(display) < 30) { + if (new_dbuf_state && + new_dbuf_state->enabled_slices != + old_dbuf_state->enabled_slices) + return true; + } + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); old_cdclk_state = intel_atomic_get_old_cdclk_state(state); if (new_cdclk_state && @@ -327,10 +332,15 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) if (IS_ERR(new_dbuf_state)) return PTR_ERR(new_dbuf_state); - new_pmdemand_state->params.active_pipes = - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); - new_pmdemand_state->params.active_dbufs = - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + if (DISPLAY_VER(i915) < 30) { + new_pmdemand_state->params.active_dbufs = + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + new_pmdemand_state->params.active_pipes = + min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); + } else { + new_pmdemand_state->params.active_pipes = + min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915)); + } new_cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(new_cdclk_state)) @@ -395,27 +405,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); - /* Set 1*/ pmdemand_state->params.qclk_gv_bw = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); pmdemand_state->params.voltage_index = REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); pmdemand_state->params.qclk_gv_index = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); - pmdemand_state->params.active_pipes = - REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); - pmdemand_state->params.active_dbufs = - REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); pmdemand_state->params.active_phys = REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); - /* Set 2*/ pmdemand_state->params.cdclk_freq_mhz = REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); pmdemand_state->params.ddiclk_max = REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); - pmdemand_state->params.scalers = - REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + + if (DISPLAY_VER(i915) >= 30) { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); + } else { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); + pmdemand_state->params.active_dbufs = + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); + + pmdemand_state->params.scalers = + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + } unlock: mutex_unlock(&i915->display.pmdemand.lock); @@ -442,6 +457,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, { u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); + /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ + if (DISPLAY_VER(i915) >= 30) + return; + mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) @@ -460,7 +479,8 @@ unlock: } static void -intel_pmdemand_update_params(const struct intel_pmdemand_state *new, +intel_pmdemand_update_params(struct intel_display *display, + const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old, u32 *reg1, u32 *reg2, bool serialized) { @@ -495,16 +515,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); - update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); - update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); /* Set 2*/ update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); - update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); + if (DISPLAY_VER(display) >= 30) { + update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); + } else { + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); + + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); + } + #undef update_reg } @@ -514,6 +540,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, const struct intel_pmdemand_state *old, bool serialized) { + struct intel_display *display = &i915->display; bool changed = false; u32 reg1, mod_reg1; u32 reg2, mod_reg2; @@ -529,7 +556,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); mod_reg2 = reg2; - intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, + intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2, serialized); if (reg1 != mod_reg1) { diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h index 128fd61f8f14..a1c49efdc493 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h @@ -20,14 +20,14 @@ struct pmdemand_params { u8 voltage_index; u8 qclk_gv_index; u8 active_pipes; - u8 active_dbufs; + u8 active_dbufs; /* pre-Xe3 only */ /* Total number of non type C active phys from active_phys_mask */ u8 active_phys; u8 plls; u16 cdclk_freq_mhz; /* max from ddi_clocks[] */ u16 ddiclk_max; - u8 scalers; + u8 scalers; /* pre-Xe3 only */ }; struct intel_pmdemand_state { diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index feddc30e3375..093fe37a3983 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -3,6 +3,8 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/debugfs.h> + #include "g4x_dp.h" #include "i915_drv.h" #include "i915_reg.h" @@ -27,11 +29,10 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd static const char *pps_name(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_pps *pps = &intel_dp->pps; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - switch (pps->pps_pipe) { + if (display->platform.valleyview || display->platform.cherryview) { + switch (pps->vlv_pps_pipe) { case INVALID_PIPE: /* * FIXME would be nice if we can guarantee @@ -43,7 +44,7 @@ static const char *pps_name(struct intel_dp *intel_dp) case PIPE_B: return "PPS B"; default: - MISSING_CASE(pps->pps_pipe); + MISSING_CASE(pps->vlv_pps_pipe); break; } } else { @@ -68,7 +69,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) intel_wakeref_t wakeref; /* - * See intel_pps_reset_all() why we need a power domain reference here. + * See vlv_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&display->pps.mutex); @@ -85,7 +86,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, mutex_unlock(&display->pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); - return 0; + return NULL; } static void @@ -94,7 +95,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) struct intel_display *display = to_intel_display(intel_dp); struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum pipe pipe = intel_dp->pps.pps_pipe; + enum pipe pipe = intel_dp->pps.vlv_pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = vlv_pipe_to_phy(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); @@ -120,7 +121,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) DP |= DP_PORT_WIDTH(1); DP |= DP_LINK_TRAIN_PAT_1; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) DP |= DP_PIPE_SEL_CHV(pipe); else DP |= DP_PIPE_SEL(pipe); @@ -132,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) * So enable temporarily it if it's not already enabled. */ if (!pll_enabled) { - release_cl_override = IS_CHERRYVIEW(dev_priv) && + release_cl_override = display->platform.cherryview && !chv_phy_powergate_ch(dev_priv, phy, ch, true); if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) { @@ -180,18 +181,18 @@ static enum pipe vlv_find_free_pps(struct intel_display *display) if (encoder->type == INTEL_OUTPUT_EDP) { drm_WARN_ON(display->drm, - intel_dp->pps.active_pipe != INVALID_PIPE && - intel_dp->pps.active_pipe != - intel_dp->pps.pps_pipe); + intel_dp->pps.vlv_active_pipe != INVALID_PIPE && + intel_dp->pps.vlv_active_pipe != + intel_dp->pps.vlv_pps_pipe); - if (intel_dp->pps.pps_pipe != INVALID_PIPE) - pipes &= ~(1 << intel_dp->pps.pps_pipe); + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe); } else { drm_WARN_ON(display->drm, - intel_dp->pps.pps_pipe != INVALID_PIPE); + intel_dp->pps.vlv_pps_pipe != INVALID_PIPE); - if (intel_dp->pps.active_pipe != INVALID_PIPE) - pipes &= ~(1 << intel_dp->pps.active_pipe); + if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->pps.vlv_active_pipe); } } @@ -213,11 +214,11 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) /* We should never land here with regular DP ports */ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE && - intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE && + intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe); - if (intel_dp->pps.pps_pipe != INVALID_PIPE) - return intel_dp->pps.pps_pipe; + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) + return intel_dp->pps.vlv_pps_pipe; pipe = vlv_find_free_pps(display); @@ -229,7 +230,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) pipe = PIPE_A; vlv_steal_power_sequencer(display, pipe); - intel_dp->pps.pps_pipe = pipe; + intel_dp->pps.vlv_pps_pipe = pipe; drm_dbg_kms(display->drm, "picked %s for [ENCODER:%d:%s]\n", @@ -246,7 +247,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) */ vlv_power_sequencer_kick(intel_dp); - return intel_dp->pps.pps_pipe; + return intel_dp->pps.vlv_pps_pipe; } static int @@ -260,10 +261,10 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) /* We should never land here with regular DP ports */ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); - if (!intel_dp->pps.pps_reset) + if (!intel_dp->pps.bxt_pps_reset) return pps_idx; - intel_dp->pps.pps_reset = false; + intel_dp->pps.bxt_pps_reset = false; /* * Only the HW needs to be reprogrammed, the SW state is fixed and @@ -325,19 +326,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_has_pp_on); + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_has_pp_on); /* didn't find one? pick one where vdd is on */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_has_vdd_on); + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_has_vdd_on); /* didn't find one? pick one with just the correct port */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) - intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port, - pps_any); + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) + intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, + pps_any); /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ - if (intel_dp->pps.pps_pipe == INVALID_PIPE) { + if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) { drm_dbg_kms(display->drm, "[ENCODER:%d:%s] no initial power sequencer\n", dig_port->base.base.base.id, dig_port->base.base.name); @@ -354,10 +355,10 @@ static int intel_num_pps(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return 2; - if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + if (display->platform.geminilake || display->platform.broxton) return 2; if (INTEL_PCH_TYPE(i915) >= PCH_MTL) @@ -404,11 +405,10 @@ pps_initial_setup(struct intel_dp *intel_dp) struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); lockdep_assert_held(&display->pps.mutex); - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + if (display->platform.valleyview || display->platform.cherryview) { vlv_initial_power_sequencer_setup(intel_dp); return true; } @@ -446,21 +446,17 @@ pps_initial_setup(struct intel_dp *intel_dp) return intel_pps_is_valid(intel_dp); } -void intel_pps_reset_all(struct intel_display *display) +void vlv_pps_reset_all(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - if (drm_WARN_ON(display->drm, !IS_LP(dev_priv))) - return; - if (!HAS_DISPLAY(display)) return; /* * We can't grab pps_mutex here due to deadlock with power_domain * mutex when power_domain functions are called while holding pps_mutex. - * That also means that in order to use pps_pipe the code needs to + * That also means that in order to use vlv_pps_pipe the code needs to * hold both a power domain reference and pps_mutex, and the power domain * reference get/put must be done while _not_ holding pps_mutex. * pps_{lock,unlock}() do these steps in the correct order, so one @@ -470,16 +466,27 @@ void intel_pps_reset_all(struct intel_display *display) for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN_ON(display->drm, - intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); - if (encoder->type != INTEL_OUTPUT_EDP) - continue; + if (encoder->type == INTEL_OUTPUT_EDP) + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; + } +} + +void bxt_pps_reset_all(struct intel_display *display) +{ + struct intel_encoder *encoder; + + if (!HAS_DISPLAY(display)) + return; + + /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */ + + for_each_intel_dp(display->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (DISPLAY_VER(display) >= 9) - intel_dp->pps.pps_reset = true; - else - intel_dp->pps.pps_pipe = INVALID_PIPE; + if (encoder->type == INTEL_OUTPUT_EDP) + intel_dp->pps.bxt_pps_reset = true; } } @@ -500,9 +507,9 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, memset(regs, 0, sizeof(*regs)); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.valleyview || display->platform.cherryview) pps_idx = vlv_power_sequencer_pipe(intel_dp); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) pps_idx = bxt_power_sequencer_idx(intel_dp); else pps_idx = intel_dp->pps.pps_idx; @@ -513,7 +520,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, regs->pp_off = PP_OFF_DELAYS(display, pps_idx); /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || + if (display->platform.geminilake || display->platform.broxton || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) regs->pp_div = INVALID_MMIO_REG; else @@ -543,12 +550,11 @@ _pp_stat_reg(struct intel_dp *intel_dp) static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_dp->pps.pps_pipe == INVALID_PIPE) + if ((display->platform.valleyview || display->platform.cherryview) && + intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) return false; return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; @@ -557,12 +563,11 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_dp->pps.pps_pipe == INVALID_PIPE) + if ((display->platform.valleyview || display->platform.cherryview) && + intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) return false; return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; @@ -792,7 +797,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) } /* - * Must be paired with intel_pps_off(). + * Must be paired with intel_pps_vdd_off() or - to disable + * both VDD and panel power - intel_pps_off(). * Nested calls to these functions are not allowed since * we drop the lock. Caller must use some higher level * locking to prevent nested calls from other threads. @@ -800,7 +806,6 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) void intel_pps_vdd_on(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; bool vdd; @@ -810,10 +815,10 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp) vdd = false; with_intel_pps_lock(intel_dp, wakeref) vdd = intel_pps_vdd_on_unlocked(intel_dp); - I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name, - pps_name(intel_dp)); + INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name, + pps_name(intel_dp)); } static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) @@ -852,8 +857,10 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) intel_de_read(display, pp_stat_reg), intel_de_read(display, pp_ctrl_reg)); - if ((pp & PANEL_POWER_ON) == 0) + if ((pp & PANEL_POWER_ON) == 0) { intel_dp->pps.panel_power_off_time = ktime_get_boottime(); + intel_dp_invalidate_source_oui(intel_dp); + } intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), @@ -920,18 +927,17 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&display->pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; - I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd, - "[ENCODER:%d:%s] %s VDD not forced on", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name, - pps_name(intel_dp)); + INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd, + "[ENCODER:%d:%s] %s VDD not forced on", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name, + pps_name(intel_dp)); intel_dp->pps.want_panel_vdd = false; @@ -941,10 +947,20 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) edp_panel_vdd_schedule_off(intel_dp); } +void intel_pps_vdd_off(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + with_intel_pps_lock(intel_dp, wakeref) + intel_pps_vdd_off_unlocked(intel_dp, false); +} + void intel_pps_on_unlocked(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pp; i915_reg_t pp_ctrl_reg; @@ -969,7 +985,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ilk_get_pp_control(intel_dp); - if (IS_IRONLAKE(dev_priv)) { + if (display->platform.ironlake) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; intel_de_write(display, pp_ctrl_reg, pp); @@ -985,7 +1001,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); pp |= PANEL_POWER_ON; - if (!IS_IRONLAKE(dev_priv)) + if (!display->platform.ironlake) pp |= PANEL_POWER_RESET; intel_de_write(display, pp_ctrl_reg, pp); @@ -998,7 +1014,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); - if (IS_IRONLAKE(dev_priv)) { + if (display->platform.ironlake) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ intel_de_write(display, pp_ctrl_reg, pp); intel_de_posting_read(display, pp_ctrl_reg); @@ -1054,6 +1070,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp) wait_panel_off(intel_dp); intel_dp->pps.panel_power_off_time = ktime_get_boottime(); + intel_dp_invalidate_source_oui(intel_dp); + /* We got a reference when we enabled the VDD. */ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), @@ -1139,7 +1157,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) return; drm_dbg_kms(display->drm, "panel power control backlight %s\n", - enable ? "enable" : "disable"); + str_enable_disable(enable)); if (enable) intel_pps_backlight_on(intel_dp); @@ -1151,10 +1169,10 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum pipe pipe = intel_dp->pps.pps_pipe; + enum pipe pipe = intel_dp->pps.vlv_pps_pipe; i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) return; @@ -1177,7 +1195,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) intel_de_write(display, pp_on_reg, 0); intel_de_posting_read(display, pp_on_reg); - intel_dp->pps.pps_pipe = INVALID_PIPE; + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; } static void vlv_steal_power_sequencer(struct intel_display *display, @@ -1190,12 +1208,12 @@ static void vlv_steal_power_sequencer(struct intel_display *display, for_each_intel_dp(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe, + drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe, "stealing PPS %c from active [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); - if (intel_dp->pps.pps_pipe != pipe) + if (intel_dp->pps.vlv_pps_pipe != pipe) continue; drm_dbg_kms(display->drm, @@ -1208,8 +1226,59 @@ static void vlv_steal_power_sequencer(struct intel_display *display, } } -void vlv_pps_init(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe; + + if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, &pipe)) + return pipe; + + return INVALID_PIPE; +} + +/* Call on all DP, not just eDP */ +void vlv_pps_pipe_init(struct intel_dp *intel_dp) +{ + intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; + intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); +} + +/* Call on all DP, not just eDP */ +void vlv_pps_pipe_reset(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + with_intel_pps_lock(intel_dp, wakeref) + intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); +} + +enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp) +{ + enum pipe pipe; + + /* + * Figure out the current pipe for the initial backlight setup. If the + * current pipe isn't valid, try the PPS pipe, and if that fails just + * assume pipe A. + */ + pipe = vlv_active_pipe(intel_dp); + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = intel_dp->pps.vlv_pps_pipe; + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = PIPE_A; + + return pipe; +} + +/* Call on all DP, not just eDP */ +void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1217,10 +1286,10 @@ void vlv_pps_init(struct intel_encoder *encoder, lockdep_assert_held(&display->pps.mutex); - drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE); + drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); - if (intel_dp->pps.pps_pipe != INVALID_PIPE && - intel_dp->pps.pps_pipe != crtc->pipe) { + if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE && + intel_dp->pps.vlv_pps_pipe != crtc->pipe) { /* * If another power sequencer was being used on this * port previously make sure to turn off vdd there while @@ -1235,13 +1304,13 @@ void vlv_pps_init(struct intel_encoder *encoder, */ vlv_steal_power_sequencer(display, crtc->pipe); - intel_dp->pps.active_pipe = crtc->pipe; + intel_dp->pps.vlv_active_pipe = crtc->pipe; if (!intel_dp_is_edp(intel_dp)) return; /* now it's all ours */ - intel_dp->pps.pps_pipe = crtc->pipe; + intel_dp->pps.vlv_pps_pipe = crtc->pipe; drm_dbg_kms(display->drm, "initializing %s for [ENCODER:%d:%s]\n", @@ -1253,6 +1322,18 @@ void vlv_pps_init(struct intel_encoder *encoder, pps_init_registers(intel_dp, true); } +/* Call on all DP, not just eDP */ +void vlv_pps_port_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_wakeref_t wakeref; + + with_intel_pps_lock(intel_dp, wakeref) + intel_dp->pps.vlv_active_pipe = INVALID_PIPE; +} + static void pps_vdd_init(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); @@ -1555,7 +1636,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { port_sel = PANEL_PORT_SELECT_VLV(port); } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { switch (port) { @@ -1602,7 +1683,6 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd void intel_pps_encoder_reset(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) @@ -1613,7 +1693,7 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp) * Reinit the power sequencer also on the resume path, in case * BIOS did something nasty with it. */ - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) vlv_initial_power_sequencer_setup(intel_dp); pps_init_delays(intel_dp); @@ -1649,11 +1729,10 @@ bool intel_pps_init(struct intel_dp *intel_dp) static void pps_init_late(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return; if (intel_num_pps(display) < 2) @@ -1711,9 +1790,9 @@ void intel_pps_setup(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); - if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton) display->pps.mmio_base = PCH_PPS_BASE; - else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + else if (display->platform.valleyview || display->platform.cherryview) display->pps.mmio_base = VLV_PPS_BASE; else display->pps.mmio_base = PPS_BASE; @@ -1785,7 +1864,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) MISSING_CASE(port_sel); break; } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + } else if (display->platform.valleyview || display->platform.cherryview) { /* presumably write lock depends on pipe, not port select */ pp_reg = PP_CONTROL(display, pipe); panel_pipe = pipe; @@ -1806,7 +1885,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; - I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked, - "panel assertion failure, pipe %c regs locked\n", - pipe_name(pipe)); + INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked, + "panel assertion failure, pipe %c regs locked\n", + pipe_name(pipe)); } diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index 0c5da83a559e..c83007152f07 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -34,6 +34,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp); void intel_pps_check_power_unlocked(struct intel_dp *intel_dp); void intel_pps_vdd_on(struct intel_dp *intel_dp); +void intel_pps_vdd_off(struct intel_dp *intel_dp); void intel_pps_on(struct intel_dp *intel_dp); void intel_pps_off(struct intel_dp *intel_dp); void intel_pps_vdd_off_sync(struct intel_dp *intel_dp); @@ -43,10 +44,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp); bool intel_pps_init(struct intel_dp *intel_dp); void intel_pps_init_late(struct intel_dp *intel_dp); void intel_pps_encoder_reset(struct intel_dp *intel_dp); -void intel_pps_reset_all(struct intel_display *display); -void vlv_pps_init(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void vlv_pps_pipe_init(struct intel_dp *intel_dp); +void vlv_pps_pipe_reset(struct intel_dp *intel_dp); +enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp); +void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void vlv_pps_port_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void vlv_pps_reset_all(struct intel_display *display); +void bxt_pps_reset_all(struct intel_display *display); void intel_pps_unlock_regs_wa(struct intel_display *display); void intel_pps_setup(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 136a0d6ca970..a784c0b81556 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -21,6 +21,8 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> @@ -33,6 +35,7 @@ #include "intel_cursor_regs.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -230,7 +233,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp) switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: if (display->params.enable_psr == -1) - return connector->panel.vbt.psr.enable; + return intel_dp_is_edp(intel_dp) ? + connector->panel.vbt.psr.enable : + true; return display->params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; @@ -762,7 +767,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - u8 val = DP_PSR_ENABLE; + u8 val = 0; if (crtc_state->has_sel_update) { val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; @@ -782,7 +787,9 @@ static void _psr_enable_sink(struct intel_dp *intel_dp, if (intel_dp->psr.entry_setup_frames > 0) val |= DP_PSR_FRAME_CAPTURE; + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); + val |= DP_PSR_ENABLE; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val); } @@ -1446,11 +1453,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (DISPLAY_VER(display) >= 12) { + if (DISPLAY_VER(display) >= 20) { + psr_max_h = crtc_hdisplay; + psr_max_v = crtc_vdisplay; + max_bpp = crtc_state->pipe_bpp; + } else if (IS_DISPLAY_VER(display, 12, 14)) { psr_max_h = 5120; psr_max_v = 3200; max_bpp = 30; - } else if (DISPLAY_VER(display) >= 10) { + } else if (IS_DISPLAY_VER(display, 10, 11)) { psr_max_h = 4096; psr_max_v = 2304; max_bpp = 24; @@ -1599,6 +1610,10 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, /* Remaining checks are for eDP only */ + if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A && + to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B) + return false; + /* 128b/132b Panel Replay is not supported on eDP */ if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(display->drm, @@ -1903,14 +1918,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * cause issues if non-supported panels are used. */ if (!intel_dp->psr.panel_replay_enabled && - (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || + (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv))) intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) intel_de_rmw(display, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 0, @@ -1998,6 +2013,15 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_dp->psr.enabled = true; intel_dp->psr.paused = false; + /* + * Link_ok is sticky and set here on PSR enable. We can assume link + * training is complete as we never continue to PSR enable with + * untrained link. Link_ok is kept as set until first short pulse + * interrupt. This is targeted to workaround panels stating bad link + * after PSR is enabled. + */ + intel_dp->psr.link_ok = true; + intel_psr_activate(intel_dp); } @@ -2095,7 +2119,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (intel_dp->psr.sel_update_enabled) { /* Wa_16012604467:adlp,mtl[a0,b0] */ if (!intel_dp->psr.panel_replay_enabled && - IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0)) + IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0)) intel_de_rmw(display, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); @@ -2114,7 +2138,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); intel_de_rmw(display, - PORT_ALPM_CTL(display, cpu_transcoder), + PORT_ALPM_CTL(cpu_transcoder), PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); } @@ -2157,6 +2181,8 @@ void intel_psr_disable(struct intel_dp *intel_dp, intel_psr_disable_locked(intel_dp); + intel_dp->psr.link_ok = false; + mutex_unlock(&intel_dp->psr.lock); cancel_work_sync(&intel_dp->psr.work); cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); @@ -2221,6 +2247,36 @@ unlock: mutex_unlock(&psr->lock); } +/** + * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed + * @crtc_state: CRTC status + * + * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't + * prevent it in case of Panel Replay. Panel Replay switches main link off on + * DC entry. This means vblank interrupts are not fired and is a problem if + * user-space is polling for vblank events. + */ +bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) { + struct intel_dp *intel_dp; + + if (!intel_encoder_is_dp(encoder)) + continue; + + intel_dp = enc_to_intel_dp(encoder); + + if (intel_dp_is_edp(intel_dp) && + CAN_PANEL_REPLAY(intel_dp)) + return true; + } + + return false; +} + static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) { struct drm_i915_private *dev_priv = to_i915(display->drm); @@ -2480,11 +2536,60 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c return true; } +/* Wa 14019834836 */ +static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_encoder *encoder; + int hactive_limit; + + if (crtc_state->psr2_su_area.y1 != 0 || + crtc_state->psr2_su_area.y2 != 0) + return; + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546; + else + hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273; + + if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit) + return; + + for_each_intel_encoder_mask_with_psr(display->drm, encoder, + crtc_state->uapi.encoder_mask) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!intel_dp_is_edp(intel_dp) && + intel_dp->psr.panel_replay_enabled && + intel_dp->psr.sel_update_enabled) { + crtc_state->psr2_su_area.y2++; + return; + } + } +} + +static void +intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + /* Wa_14014971492 */ + if (!crtc_state->has_panel_replay && + ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || + IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) && + crtc_state->splitter.enable) + crtc_state->psr2_su_area.y1 = 0; + + /* Wa 14019834836 */ + if (DISPLAY_VER(display) == 30) + intel_psr_apply_pr_link_on_su_wa(crtc_state); +} + int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *new_plane_state, *old_plane_state; struct intel_plane *plane; @@ -2589,12 +2694,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (full_update) goto skip_sel_fetch_set_loop; - /* Wa_14014971492 */ - if (!crtc_state->has_panel_replay && - ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) || - IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) && - crtc_state->splitter.enable) - crtc_state->psr2_su_area.y1 = 0; + intel_psr_apply_su_area_workarounds(crtc_state); ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) @@ -3373,6 +3473,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) mutex_lock(&psr->lock); + psr->link_ok = false; + if (!psr->enabled) goto exit; @@ -3433,6 +3535,33 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) } /** + * intel_psr_link_ok - return psr->link_ok + * @intel_dp: struct intel_dp + * + * We are seeing unexpected link re-trainings with some panels. This is caused + * by panel stating bad link status after PSR is enabled. Code checking link + * status can call this to ensure it can ignore bad link status stated by the + * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link + * is ok caller should rely on latter. + * + * Return value of link_ok + */ +bool intel_psr_link_ok(struct intel_dp *intel_dp) +{ + bool ret; + + if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) || + !intel_dp_is_edp(intel_dp)) + return false; + + mutex_lock(&intel_dp->psr.lock); + ret = intel_dp->psr.link_ok; + mutex_unlock(&intel_dp->psr.lock); + + return ret; +} + +/** * intel_psr_lock - grab PSR lock * @crtc_state: the crtc state * @@ -3848,10 +3977,8 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector) struct drm_i915_private *i915 = to_i915(connector->base.dev); struct dentry *root = connector->base.debugfs_entry; - /* TODO: Add support for MST connectors as well. */ - if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && - connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) || - connector->mst_port) + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && + connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) return; debugfs_create_file("i915_psr_sink_status", 0444, root, diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 6eb5f15f674f..956be263c09e 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -58,6 +58,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); +bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state); +bool intel_psr_link_ok(struct intel_dp *intel_dp); void intel_psr_lock(const struct intel_crtc_state *crtc_state); void intel_psr_unlock(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 642bb15fb547..9ad7611506e8 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -9,6 +9,7 @@ #include "intel_display_reg_defs.h" #include "intel_dp_aux_regs.h" +#define _TRANS_EXITLINE_A 0x60018 #define TRANS_EXITLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) #define EXITLINE_MASK REG_GENMASK(12, 0) @@ -295,9 +296,9 @@ #define _PORT_ALPM_CTL_A 0x16fa2c #define _PORT_ALPM_CTL_B 0x16fc2c -#define PORT_ALPM_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B) +#define PORT_ALPM_CTL(port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B) #define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31) -#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20) +#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(25, 20) #define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val) #define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16) #define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val) @@ -306,7 +307,7 @@ #define _PORT_ALPM_LFPS_CTL_A 0x16fa30 #define _PORT_ALPM_LFPS_CTL_B 0x16fc30 -#define PORT_ALPM_LFPS_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B) +#define PORT_ALPM_LFPS_CTL(port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B) #define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31) #define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24) #define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7 diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 29b56d53a340..28f497ae785b 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -231,7 +231,7 @@ static struct intel_quirk intel_quirks[] = { { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; -static struct intel_dpcd_quirk intel_dpcd_quirks[] = { +static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { /* Dell Precision 5490 */ { .device = 0x7d55, @@ -272,7 +272,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp, int i; for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) { - struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i]; + const struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i]; if (d->device == q->device && (d->subsystem_vendor == q->subsystem_vendor || diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 7cc519b402e9..7a28104f68ad 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -36,6 +36,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_eld.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -2081,10 +2082,10 @@ intel_sdvo_get_edid(struct drm_connector *connector) static const struct drm_edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); struct i2c_adapter *ddc; - ddc = intel_gmbus_get_adapter(i915, i915->display.vbt.crt_ddc_pin); + ddc = intel_gmbus_get_adapter(display, display->vbt.crt_ddc_pin); if (!ddc) return NULL; @@ -2637,6 +2638,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo *sdvo, static void intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) { + struct intel_display *display = to_intel_display(&sdvo->base); struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev); const struct sdvo_device_mapping *mapping; u8 pin; @@ -2647,7 +2649,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized && - intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) + intel_gmbus_is_valid_pin(display, mapping->i2c_pin)) pin = mapping->i2c_pin; else pin = GMBUS_PIN_DPB; @@ -2656,7 +2658,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo) sdvo->base.base.base.id, sdvo->base.base.name, pin, sdvo->target_addr); - sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); + sdvo->i2c = intel_gmbus_get_adapter(display, pin); /* * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index e6df1f92def5..4b3a32736fd6 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1997,6 +1997,7 @@ int intel_snps_phy_check_hdmi_link_rate(int clock) void intel_mpllb_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -2019,11 +2020,11 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state, intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); #define MPLLB_CHECK(__name) \ - I915_STATE_WARN(i915, mpllb_sw_state->__name != mpllb_hw_state.__name, \ - "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ - crtc->base.base.id, crtc->base.name, \ - __stringify(__name), \ - mpllb_sw_state->__name, mpllb_hw_state.__name) + INTEL_DISPLAY_STATE_WARN(display, mpllb_sw_state->__name != mpllb_hw_state.__name, \ + "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ + crtc->base.base.id, crtc->base.name, \ + __stringify(__name), \ + mpllb_sw_state->__name, mpllb_hw_state.__name) MPLLB_CHECK(mpllb_cp); MPLLB_CHECK(mpllb_div); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e657b09ede99..e6fadcef58e0 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -378,7 +378,8 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_sprite_update_noarm(struct intel_plane *plane, +vlv_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -399,7 +400,8 @@ vlv_sprite_update_noarm(struct intel_plane *plane, } static void -vlv_sprite_update_arm(struct intel_plane *plane, +vlv_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -449,7 +451,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, } static void -vlv_sprite_disable_arm(struct intel_plane *plane, +vlv_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -795,7 +798,8 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_sprite_update_noarm(struct intel_plane *plane, +ivb_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -826,7 +830,8 @@ ivb_sprite_update_noarm(struct intel_plane *plane, } static void -ivb_sprite_update_arm(struct intel_plane *plane, +ivb_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -874,7 +879,8 @@ ivb_sprite_update_arm(struct intel_plane *plane, } static void -ivb_sprite_disable_arm(struct intel_plane *plane, +ivb_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -1133,7 +1139,8 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_sprite_update_noarm(struct intel_plane *plane, +g4x_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1162,7 +1169,8 @@ g4x_sprite_update_noarm(struct intel_plane *plane, } static void -g4x_sprite_update_arm(struct intel_plane *plane, +g4x_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1206,7 +1214,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, } static void -g4x_sprite_disable_arm(struct intel_plane *plane, +g4x_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 044a032e41b9..531079979c05 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -8,9 +8,6 @@ #include <linux/types.h> -struct drm_device; -struct drm_display_mode; -struct drm_file; struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; @@ -19,8 +16,6 @@ enum pipe; #ifdef I915 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); -int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c index 4853c4806004..1d0b84b464c1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c +++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c @@ -42,6 +42,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state, int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct intel_display *display = to_intel_display(dev); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_intel_sprite_colorkey *set = data; struct drm_plane *plane; @@ -100,7 +101,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, + intel_crtc_for_pipe(display, to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 6f2ee7dbc43b..b16c4d2d4077 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -1005,7 +1005,7 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { drm_dbg_kms(&i915->drm, "Port %s: timeout waiting for TCSS power to get %s\n", - enabled ? "enabled" : "disabled", + str_enabled_disabled(enabled), tc->port_name); return false; } diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 5fee4be64592..27c530218ee6 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -33,6 +33,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -1092,7 +1093,6 @@ intel_tv_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode mode = {}; @@ -1166,7 +1166,7 @@ intel_tv_get_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock /= 2; /* pixel counter doesn't work on i965gm TV output */ - if (IS_I965GM(dev_priv)) + if (display->platform.i965gm) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; } @@ -1196,7 +1196,6 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct intel_atomic_state *state = to_intel_atomic_state(pipe_config->uapi.state); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); @@ -1348,7 +1347,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, adjusted_mode->name[0] = '\0'; /* pixel counter doesn't work on i965gm TV output */ - if (IS_I965GM(dev_priv)) + if (display->platform.i965gm) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; @@ -1524,7 +1523,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; /* Enable two fixes for the chips that need them. */ - if (IS_I915GM(dev_priv)) + if (display->platform.i915gm) tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; set_tv_mode_timings(display, tv_mode, burst_ena); @@ -1626,7 +1625,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, * The TV sense state should be cleared to zero on cantiga platform. Otherwise * the TV is misdetected. This is hardware requirement. */ - if (IS_GM45(dev_priv)) + if (display->platform.gm45) tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 0b7f2134e441..a95fb3349eba 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -3,6 +3,8 @@ * Copyright © 2022-2023 Intel Corporation */ +#include <drm/drm_vblank.h> + #include "i915_drv.h" #include "i915_reg.h" #include "intel_color.h" @@ -193,7 +195,6 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* * The scanline counter increments at the leading edge of hsync. @@ -223,7 +224,7 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) */ if (DISPLAY_VER(display) == 2) return -1; - else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return 2; else return 1; @@ -325,14 +326,13 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, const struct drm_display_mode *mode) { struct intel_display *display = to_intel_display(_crtc->dev); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc *crtc = to_intel_crtc(_crtc); enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; bool use_scanline_counter = DISPLAY_VER(display) >= 5 || - IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 || + display->platform.g4x || DISPLAY_VER(display) == 2 || crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; if (drm_WARN_ON(display->drm, !mode->crtc_clock)) { @@ -601,14 +601,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state, struct intel_vblank_evade_ctx *evade) { + struct intel_display *display = to_intel_display(new_crtc_state); struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state; const struct drm_display_mode *adjusted_mode; evade->crtc = crtc; - evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) && + evade->need_vlv_dsi_wa = (display->platform.valleyview || + display->platform.cherryview) && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 2e849b015e74..40525f5c4c42 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -306,6 +306,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; + if (vdsc_cfg->bits_per_component < 8) { + drm_dbg_kms(&dev_priv->drm, "DSC bpc requirements not met bpc: %d\n", + vdsc_cfg->bits_per_component); + return -EINVAL; + } + drm_dsc_set_rc_buf_thresh(vdsc_cfg); /* @@ -379,9 +385,9 @@ static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state) { int num_vdsc_instances = intel_dsc_get_vdsc_per_pipe(crtc_state); + int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); - if (crtc_state->joiner_pipes) - num_vdsc_instances *= 2; + num_vdsc_instances *= num_joined_pipes; return num_vdsc_instances; } @@ -742,7 +748,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) u32 dss_ctl1_val = 0; if (crtc_state->joiner_pipes && !crtc_state->dsc.compression_enable) { - if (intel_crtc_is_joiner_secondary(crtc_state)) + if (intel_crtc_is_bigjoiner_secondary(crtc_state)) dss_ctl1_val |= UNCOMPRESSED_JOINER_SECONDARY; else dss_ctl1_val |= UNCOMPRESSED_JOINER_PRIMARY; @@ -770,8 +776,15 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state) dss_ctl1_val |= JOINER_ENABLE; } if (crtc_state->joiner_pipes) { + if (intel_crtc_ultrajoiner_enable_needed(crtc_state)) + dss_ctl1_val |= ULTRA_JOINER_ENABLE; + + if (intel_crtc_is_ultrajoiner_primary(crtc_state)) + dss_ctl1_val |= PRIMARY_ULTRA_JOINER_ENABLE; + dss_ctl1_val |= BIG_JOINER_ENABLE; - if (!intel_crtc_is_joiner_secondary(crtc_state)) + + if (intel_crtc_is_bigjoiner_primary(crtc_state)) dss_ctl1_val |= PRIMARY_BIG_JOINER_ENABLE; } intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h index f921ad67b587..bf32a3b46fb1 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -37,6 +37,8 @@ #define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) #define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) #define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) +#define ULTRA_JOINER_ENABLE REG_BIT(23) +#define PRIMARY_ULTRA_JOINER_ENABLE REG_BIT(22) #define UNCOMPRESSED_JOINER_PRIMARY (1 << 21) #define UNCOMPRESSED_JOINER_SECONDARY (1 << 20) diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index 0b5916c15307..fd18dd07ae49 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -14,24 +14,24 @@ #include "intel_de.h" #include "intel_vga.h" -static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) +static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display) { - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + if (display->platform.valleyview || display->platform.cherryview) return VLV_VGACNTRL; - else if (DISPLAY_VER(i915) >= 5) + else if (DISPLAY_VER(display) >= 5) return CPU_VGACNTRL; else return VGACNTRL; } /* Disable the VGA plane that we never use */ -void intel_vga_disable(struct drm_i915_private *dev_priv) +void intel_vga_disable(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); + i915_reg_t vga_reg = intel_vga_cntrl_reg(display); u8 sr1; - if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE) + if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE) return; /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ @@ -42,23 +42,24 @@ void intel_vga_disable(struct drm_i915_private *dev_priv) vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); - intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE); - intel_de_posting_read(dev_priv, vga_reg); + intel_de_write(display, vga_reg, VGA_DISP_DISABLE); + intel_de_posting_read(display, vga_reg); } -void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv) +void intel_vga_redisable_power_on(struct intel_display *display) { - i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + i915_reg_t vga_reg = intel_vga_cntrl_reg(display); - if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) { - drm_dbg_kms(&dev_priv->drm, + if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) { + drm_dbg_kms(display->drm, "Something enabled VGA plane, disabling it\n"); - intel_vga_disable(dev_priv); + intel_vga_disable(display); } } -void intel_vga_redisable(struct drm_i915_private *i915) +void intel_vga_redisable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref; /* @@ -74,14 +75,14 @@ void intel_vga_redisable(struct drm_i915_private *i915) if (!wakeref) return; - intel_vga_redisable_power_on(i915); + intel_vga_redisable_power_on(display); intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref); } -void intel_vga_reset_io_mem(struct drm_i915_private *i915) +void intel_vga_reset_io_mem(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -98,10 +99,10 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915) vga_put(pdev, VGA_RSRC_LEGACY_IO); } -int intel_vga_register(struct drm_i915_private *i915) +int intel_vga_register(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); int ret; /* @@ -119,9 +120,9 @@ int intel_vga_register(struct drm_i915_private *i915) return 0; } -void intel_vga_unregister(struct drm_i915_private *i915) +void intel_vga_unregister(struct intel_display *display) { - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *pdev = to_pci_dev(display->drm->dev); vga_client_unregister(pdev); } diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h index ba5b55b917f0..824dfc32a199 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.h +++ b/drivers/gpu/drm/i915/display/intel_vga.h @@ -6,13 +6,13 @@ #ifndef __INTEL_VGA_H__ #define __INTEL_VGA_H__ -struct drm_i915_private; +struct intel_display; -void intel_vga_reset_io_mem(struct drm_i915_private *i915); -void intel_vga_disable(struct drm_i915_private *i915); -void intel_vga_redisable(struct drm_i915_private *i915); -void intel_vga_redisable_power_on(struct drm_i915_private *i915); -int intel_vga_register(struct drm_i915_private *i915); -void intel_vga_unregister(struct drm_i915_private *i915); +void intel_vga_reset_io_mem(struct intel_display *display); +void intel_vga_disable(struct intel_display *display); +void intel_vga_redisable(struct intel_display *display); +void intel_vga_redisable_power_on(struct intel_display *display); +int intel_vga_register(struct intel_display *display); +void intel_vga_unregister(struct intel_display *display); #endif /* __INTEL_VGA_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 9a51f5bac307..19a5d0076bb8 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -56,6 +56,11 @@ bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh) vrefresh <= info->monitor_range.max_vfreq; } +bool intel_vrr_possible(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->vrr.flipline; +} + void intel_vrr_check_modeset(struct intel_atomic_state *state) { @@ -239,11 +244,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, (crtc_state->hw.adjusted_mode.crtc_vtotal - crtc_state->hw.adjusted_mode.vsync_end); } +} + +void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + if (!intel_vrr_possible(crtc_state)) + return; - /* - * For XE_LPD+, we use guardband and pipeline override - * is deprecated. - */ if (DISPLAY_VER(display) >= 13) { crtc_state->vrr.guardband = crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; @@ -281,7 +291,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0, PIPE_VBLANK_WITH_DELAY); - if (!crtc_state->vrr.flipline) { + if (!intel_vrr_possible(crtc_state)) { intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0); return; diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 89937858200d..b3b45c675020 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -15,9 +15,11 @@ struct intel_crtc_state; bool intel_vrr_is_capable(struct intel_connector *connector); bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh); +bool intel_vrr_possible(const struct intel_crtc_state *crtc_state); void intel_vrr_check_modeset(struct intel_atomic_state *state); void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state); +void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state); void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state); void intel_vrr_enable(const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c index 82c4933ad507..d7dc49aecd27 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.c +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/debugfs.h> + #include "i915_drv.h" #include "i9xx_wm.h" #include "intel_display_types.h" @@ -48,29 +50,15 @@ void intel_update_watermarks(struct drm_i915_private *i915) i915->display.funcs.wm->update_wm(i915); } -int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) +int intel_wm_compute(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - - if (i915->display.funcs.wm->compute_pipe_wm) - return i915->display.funcs.wm->compute_pipe_wm(state, crtc); - - return 0; -} - -int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - if (!i915->display.funcs.wm->compute_intermediate_wm) - return 0; + struct intel_display *display = to_intel_display(state); - if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) + if (!display->funcs.wm->compute_watermarks) return 0; - return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); + return display->funcs.wm->compute_watermarks(state, crtc); } bool intel_initial_watermarks(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h index 48429ac140d2..e97cdca89a5c 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.h +++ b/drivers/gpu/drm/i915/display/intel_wm.h @@ -15,10 +15,8 @@ struct intel_crtc_state; struct intel_plane_state; void intel_update_watermarks(struct drm_i915_private *i915); -int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc); -int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc); +int intel_wm_compute(struct intel_atomic_state *state, + struct intel_crtc *crtc); bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_atomic_update_watermarks(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index baa601d27815..7dbc99b02eaa 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -272,7 +272,6 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; bool force_detach = !fb || !plane_state->uapi.visible; bool need_scaler = false; @@ -281,72 +280,16 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) need_scaler = true; - ret = skl_update_scaler(crtc_state, force_detach, - drm_plane_index(&intel_plane->base), - &plane_state->scaler_id, - drm_rect_width(&plane_state->uapi.src) >> 16, - drm_rect_height(&plane_state->uapi.src) >> 16, - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst), - fb ? fb->format : NULL, - fb ? fb->modifier : 0, - need_scaler); - - if (ret || plane_state->scaler_id < 0) - return ret; - - /* check colorkey */ - if (plane_state->ckey.flags) { - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] scaling with color key not allowed", - intel_plane->base.base.id, - intel_plane->base.name); - return -EINVAL; - } - - /* Check src format */ - switch (fb->format->format) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_XYUV8888: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - break; - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - if (DISPLAY_VER(dev_priv) >= 11) - break; - fallthrough; - default: - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, intel_plane->base.name, - fb->base.id, fb->format->format); - return -EINVAL; - } - - return 0; + return skl_update_scaler(crtc_state, force_detach, + drm_plane_index(&intel_plane->base), + &plane_state->scaler_id, + drm_rect_width(&plane_state->uapi.src) >> 16, + drm_rect_height(&plane_state->uapi.src) >> 16, + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst), + fb ? fb->format : NULL, + fb ? fb->modifier : 0, + need_scaler); } static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index c8720d31d101..038ca2ec5d7a 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" @@ -349,7 +350,6 @@ static int skl_plane_max_width(const struct drm_framebuffer *fb, return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -431,6 +431,16 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb, } } +static int xe3_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 4096; + else + return 6144; +} + static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) @@ -593,11 +603,11 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane, * in full-range YCbCr. */ static void -icl_program_input_csc(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, +icl_program_input_csc(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -641,31 +651,31 @@ icl_program_input_csc(struct intel_plane *plane, }; const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), - ROFF(csc[0]) | GOFF(csc[1])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), - BOFF(csc[2])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), - ROFF(csc[3]) | GOFF(csc[4])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), - BOFF(csc[5])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), - ROFF(csc[6]) | GOFF(csc[7])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), - BOFF(csc[8])); - - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), - PREOFF_YUV_TO_RGB_HI); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), - PREOFF_YUV_TO_RGB_LO); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), + BOFF(csc[2])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), + ROFF(csc[3]) | GOFF(csc[4])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), + BOFF(csc[5])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), + ROFF(csc[6]) | GOFF(csc[7])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), + BOFF(csc[8])); + + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); } static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, @@ -719,9 +729,11 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_plane_wm(struct intel_plane *plane, +static void skl_write_plane_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -733,71 +745,75 @@ static void skl_write_plane_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, PLANE_WM(pipe, plane_id, level), - skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level), + skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, PLANE_WM_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, PLANE_WM_SAGV(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, PLANE_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb)); if (DISPLAY_VER(i915) < 11) - intel_de_write_fw(i915, PLANE_NV12_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb_y)); + intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb_y)); } static void -skl_plane_disable_arm(struct intel_plane *plane, +skl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } -static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); } static void -icl_plane_disable_arm(struct intel_plane *plane, +icl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - icl_plane_disable_sel_fetch_arm(plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } static bool @@ -1234,28 +1250,30 @@ static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) return keymsk; } -static void icl_plane_csc_load_black(struct intel_plane *plane) +static void icl_plane_csc_load_black(struct intel_dsb *dsb, + struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static int icl_plane_color_plane(const struct intel_plane_state *plane_state) @@ -1268,11 +1286,12 @@ static int icl_plane_color_plane(const struct intel_plane_state *plane_state) } static void -skl_plane_update_noarm(struct intel_plane *plane, +skl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 stride = skl_plane_stride(plane_state, 0); @@ -1287,21 +1306,23 @@ skl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); } static void -skl_plane_update_arm(struct intel_plane *plane, +skl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1321,22 +1342,26 @@ skl_plane_update_arm(struct intel_plane *plane, plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, 0)); - intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | - PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); + intel_de_write_dsb(display, dsb, PLANE_AUX_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | + PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); /* * Enable the scaler before the plane so that we don't @@ -1353,17 +1378,19 @@ skl_plane_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, 0)); } -static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_rect *clip; u32 val; @@ -1380,7 +1407,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, y = (clip->y1 + plane_state->uapi.dst.y1); val = y << 16; val |= plane_state->uapi.dst.x1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_POS(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_POS(pipe, plane->id), val); x = plane_state->view.color_plane[color_plane].x; @@ -1395,20 +1422,21 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, val = y << 16 | x; - intel_de_write_fw(i915, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), - val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), val); /* Sizes are 0 based */ val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); } static void -icl_plane_update_noarm(struct intel_plane *plane, +icl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1432,76 +1460,82 @@ icl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), - lower_32_bits(plane_state->ccval)); - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), - upper_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); } /* FLAT CCS doesn't need to program AUX_DIST */ if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20) - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, color_plane)); if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), - plane_state->cus_ctl); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), + plane_state->cus_ctl); - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) - icl_program_input_csc(plane, crtc_state, plane_state); + icl_program_input_csc(dsb, plane, plane_state); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); /* * FIXME: pxp session invalidation can hit any time even at time of commit * or after the commit, display content will be garbage. */ if (plane_state->force_black) - icl_plane_csc_load_black(plane); + icl_plane_csc_load_black(dsb, plane, crtc_state); - icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); + icl_plane_update_sel_fetch_noarm(dsb, plane, crtc_state, plane_state, color_plane); } -static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), - SEL_FETCH_PLANE_CTL_ENABLE); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), + SEL_FETCH_PLANE_CTL_ENABLE); else - icl_plane_disable_sel_fetch_arm(plane, crtc_state); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); } static void -icl_plane_update_arm(struct intel_plane *plane, +icl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; int color_plane = icl_plane_color_plane(plane_state); @@ -1520,37 +1554,45 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); - icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state); + icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, color_plane)); } static void -skl_plane_async_flip(struct intel_plane *plane, +skl_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 plane_ctl = plane_state->ctl; + u32 plane_ctl = plane_state->ctl, plane_surf; plane_ctl |= skl_plane_ctl_crtc(crtc_state); + plane_surf = skl_plane_surf(plane_state, 0); - if (async_flip) - plane_ctl |= PLANE_CTL_ASYNC_FLIP; + if (async_flip) { + if (DISPLAY_VER(display) >= 30) + plane_surf |= PLANE_SURF_ASYNC_UPDATE; + else + plane_ctl |= PLANE_CTL_ASYNC_FLIP; + } - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + plane_surf); } static bool intel_format_is_p01x(u32 format) @@ -2095,13 +2137,13 @@ static void check_protection(struct intel_plane_state *plane_state) struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); if (DISPLAY_VER(i915) < 11) return; plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0; - plane_state->force_black = i915_gem_object_is_protected(obj) && + plane_state->force_black = intel_bo_is_protected(obj) && !plane_state->decrypt; } @@ -2313,8 +2355,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) +static bool icl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); @@ -2326,9 +2368,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; + case DRM_FORMAT_RGB565: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2338,20 +2385,69 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - if (intel_fb_is_mc_ccs_modifier(modifier)) + case DRM_FORMAT_XVYU2101010: + if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; fallthrough; - case DRM_FORMAT_RGB565: + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + fallthrough; + default: + return false; + } +} + +static bool tgl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + struct intel_plane *plane = to_intel_plane(_plane); + + if (!intel_fb_plane_supports_modifier(plane, modifier)) + return false; + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: + if (intel_fb_is_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + if (intel_fb_is_mc_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_C8: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: @@ -2374,13 +2470,22 @@ static const struct drm_plane_funcs skl_plane_funcs = { .format_mod_supported = skl_plane_format_mod_supported, }; -static const struct drm_plane_funcs gen12_plane_funcs = { +static const struct drm_plane_funcs icl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = gen12_plane_format_mod_supported, + .format_mod_supported = icl_plane_format_mod_supported, +}; + +static const struct drm_plane_funcs tgl_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = tgl_plane_format_mod_supported, }; static void @@ -2422,8 +2527,8 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, (plane_id == PLANE_1 || plane_id == PLANE_2); } -static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, - enum plane_id plane_id) +static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915, + enum plane_id plane_id) { if (DISPLAY_VER(i915) < 12) return false; @@ -2461,7 +2566,7 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915, caps |= INTEL_PLANE_CAP_CCS_RC_CC; } - if (gen12_plane_has_mc_ccs(i915, plane_id)) + if (tgl_plane_has_mc_ccs(i915, plane_id)) caps |= INTEL_PLANE_CAP_CCS_MC; if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915)) @@ -2494,7 +2599,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(dev_priv) >= 30) { + plane->max_width = xe3_plane_max_width; + plane->max_height = icl_plane_max_height; + plane->min_cdclk = icl_plane_min_cdclk; + } else if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; if (icl_is_hdr_plane(dev_priv, plane_id)) plane->max_width = icl_hdr_plane_max_width; @@ -2552,7 +2661,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane_id, &num_formats); if (DISPLAY_VER(dev_priv) >= 12) - plane_funcs = &gen12_plane_funcs; + plane_funcs = &tgl_plane_funcs; + else if (DISPLAY_VER(dev_priv) == 11) + plane_funcs = &icl_plane_funcs; else plane_funcs = &skl_plane_funcs; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h index 4ddcd7d46bbd..ff31a00d511e 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h +++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h @@ -159,6 +159,7 @@ _PLANE_SURF_2_A, _PLANE_SURF_2_B) #define PLANE_SURF_ADDR_MASK REG_GENMASK(31, 12) #define PLANE_SURF_DECRYPT REG_BIT(2) +#define PLANE_SURF_ASYNC_UPDATE REG_BIT(0) #define _PLANE_KEYMAX_1_A 0x701a0 #define _PLANE_KEYMAX_2_A 0x702a0 diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 045c7cac166b..3b0e87edbacf 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -3,6 +3,8 @@ * Copyright © 2022 Intel Corporation */ +#include <linux/debugfs.h> + #include <drm/drm_blend.h> #include "i915_drv.h" @@ -716,7 +718,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane); + int color_plane, unsigned int pan_x); static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, @@ -763,7 +765,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, drm_format_info(DRM_FORMAT_ARGB8888), DRM_FORMAT_MOD_LINEAR, DRM_MODE_ROTATE_0, - crtc_state->pixel_rate, &wp, 0); + crtc_state->pixel_rate, &wp, 0, 0); drm_WARN_ON(&i915->drm, ret); for (level = 0; level < i915->display.wm.num_levels; level++) { @@ -1740,7 +1742,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane) + int color_plane, unsigned int pan_x) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -1801,7 +1803,9 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_min_scanlines, wp->dbuf_block_size); - if (DISPLAY_VER(i915) >= 10) + if (DISPLAY_VER(i915) >= 30) + interm_pbpl += (pan_x != 0); + else if (DISPLAY_VER(i915) >= 10) interm_pbpl++; wp->plane_blocks_per_line = div_fixed16(interm_pbpl, @@ -1843,7 +1847,8 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, fb->format, fb->modifier, plane_state->hw.rotation, intel_plane_pixel_rate(crtc_state, plane_state), - wp, color_plane); + wp, color_plane, + plane_state->uapi.src.x1); } static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) @@ -1907,7 +1912,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } } - blocks = fixed16_to_u32_round_up(selected_result) + 1; + blocks = fixed16_to_u32_round_up(selected_result); + if (DISPLAY_VER(i915) < 30) + blocks++; + /* * Lets have blocks at minimum equivalent to plane_blocks_per_line * as there will be at minimum one line for lines configuration. This @@ -2971,6 +2979,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, static void skl_wm_get_hw_state(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc *crtc; @@ -2978,7 +2987,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) if (HAS_MBUS_JOINING(i915)) dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; - dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw); + dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = @@ -3524,7 +3533,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state) for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) { u32 pipe_val = val; - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { + if (DISPLAY_VERx100(i915) == 1400) { if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, new_dbuf_state->active_pipes)) pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL; @@ -3598,6 +3607,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, const struct intel_dbuf_state *dbuf_state) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe = ffs(dbuf_state->active_pipes) - 1; const struct intel_crtc_state *new_crtc_state; @@ -3606,7 +3616,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus); drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes)); - crtc = intel_crtc_for_pipe(i915, pipe); + crtc = intel_crtc_for_pipe(display, pipe); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state)) @@ -3668,7 +3678,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state) void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); const struct intel_dbuf_state *old_dbuf_state = @@ -3687,7 +3697,7 @@ void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state) intel_dbuf_mbus_join_update(state, pipe); if (pipe != INVALID_PIPE) { - struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); intel_crtc_wait_for_next_vblank(crtc); } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 78b121941237..e73baec94873 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -73,9 +73,9 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state); container_of_const((global_state), struct intel_dbuf_state, base) #define intel_atomic_get_old_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) #define intel_atomic_get_new_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) int intel_dbuf_init(struct drm_i915_private *i915); int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index d21f3fb39706..9383eedee2d4 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -30,6 +30,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" @@ -43,6 +44,7 @@ #include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" +#include "intel_pfit.h" #include "skl_scaler.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" @@ -1071,7 +1073,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port)); hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port)); - /* harizontal values are in terms of high speed byte clock */ + /* horizontal values are in terms of high speed byte clock */ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 70c5a13a3c75..59a50647f2c3 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -592,15 +592,16 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) static void assert_dsi_pll(struct drm_i915_private *i915, bool state) { + struct intel_display *display = &i915->display; bool cur_state; vlv_cck_get(i915); cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN; vlv_cck_put(i915); - I915_STATE_WARN(i915, cur_state != state, - "DSI PLL state assertion failure (expected %s, current %s)\n", - str_on_off(state), str_on_off(cur_state)); + INTEL_DISPLAY_STATE_WARN(display, cur_state != state, + "DSI PLL state assertion failure (expected %s, current %s)\n", + str_on_off(state), str_on_off(cur_state)); } void assert_dsi_pll_enabled(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index a3b83cfe1726..f151640c1d13 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -915,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) */ if (i915_gem_context_uses_protected_content(eb->gem_context) && i915_gem_object_is_protected(obj)) { - err = intel_pxp_key_check(eb->i915->pxp, obj, true); + err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true); if (err) { i915_gem_object_put(obj); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3b27218aabe2..900c08337942 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -13,7 +13,7 @@ #include "i915_driver.h" #include "i915_drv.h" -#if defined(CONFIG_X86) +#if IS_ENABLED(CONFIG_X86) #include <asm/smp.h> #else #define wbinvd_on_all_cpus() \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index d166052eb2ce..9117e9422844 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -117,7 +117,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, }, { NULL, 0 }, }, *phase; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; unsigned long count = 0; unsigned long scanned = 0; int err = 0, i = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index d29005980806..9d958a6f377e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -457,7 +457,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915) icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 8) { - if (IS_LP(i915)) + if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index b22e2019768f..10d8673641f7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -808,7 +808,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, } if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; @@ -1038,7 +1038,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; vm_fault_t ret; int idx; @@ -1195,7 +1195,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; assert_object_held_shared(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 03b00a03a634..041dab543b78 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -624,7 +624,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, /* Populate ttm with pages if needed. Typically system memory. */ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index ad649523d5e0..61596cecce4d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -90,7 +90,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, goto out_no_lock; backup_bo = i915_gem_to_ttm(backup); - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (err) goto out_no_populate; @@ -189,7 +189,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, if (!backup_bo->resource) err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); if (!err) - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8fe0499308ff..4904d0f4162c 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -169,7 +169,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, return cs; } -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs) { return __gen2_emit_breadcrumb(rq, cs, 16, 8); } @@ -248,7 +248,7 @@ int i830_emit_bb_start(struct i915_request *rq, return 0; } -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { @@ -292,29 +292,12 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); - ENGINE_POSTING_READ16(engine, RING_IMR); -} - -void gen2_irq_disable(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); -} - -void gen3_irq_enable(struct intel_engine_cs *engine) -{ engine->i915->irq_mask &= ~engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } -void gen3_irq_disable(struct intel_engine_cs *engine) +void gen2_irq_disable(struct intel_engine_cs *engine) { engine->i915->irq_mask |= engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h index a5cd64a65c9e..7b37560fc356 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h @@ -15,13 +15,13 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode); int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode); int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode); -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs); +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs); u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs); int i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); int gen4_emit_bb_start(struct i915_request *rq, @@ -30,8 +30,6 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine); void gen2_irq_disable(struct intel_engine_cs *engine); -void gen3_irq_enable(struct intel_engine_cs *engine); -void gen3_irq_disable(struct intel_engine_cs *engine); void gen5_irq_enable(struct intel_engine_cs *engine); void gen5_irq_disable(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index d38b914d1206..6e89112f68ae 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -399,7 +399,8 @@ static void emit_batch(struct i915_vma * const vma, batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); batch_add(&cmds, 0xffff0000 | - ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ? + (((IS_IVYBRIDGE(i915) && INTEL_INFO(i915)->gt == 1) || + IS_VALLEYVIEW(i915)) ? HIZ_RAW_STALL_OPT_DISABLE : 0)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 20b9b04ec1e0..cc866773ba6f 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -70,7 +70,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) if (!--b->irq_enabled) b->irq_disable(b); - WRITE_ONCE(b->irq_armed, 0); + WRITE_ONCE(b->irq_armed, NULL); intel_gt_pm_put_async(b->irq_engine->gt, wakeref); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index a8eac59e3779..1c4784cb296c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -15,6 +15,7 @@ #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC +#define HEAD_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_START(base) _MMIO((base) + 0x38) #define RING_CTL(base) _MMIO((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ @@ -26,7 +27,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_SYNC_0(base) _MMIO((base) + 0x40) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index a6c69a706fd7..c4a351ebf395 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -185,7 +185,7 @@ int intel_gt_init_hw(struct intel_gt *gt) if (IS_HASWELL(i915)) intel_uncore_write(uncore, HSW_MI_PREDICATE_RESULT_2, - IS_HASWELL_GT3(i915) ? + INTEL_INFO(i915)->gt == 3 ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ @@ -302,7 +302,7 @@ static void gen6_check_faults(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - u32 fault; + unsigned long fault; for_each_engine(engine, gt, id) { fault = GEN6_RING_FAULT_REG_READ(engine); @@ -310,8 +310,8 @@ static void gen6_check_faults(struct intel_gt *gt) gt_dbg(gt, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", + "\tSource ID: %ld\n" + "\tType: %ld\n", fault & PAGE_MASK, fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index ad4c51f18d3a..1240d44eeb85 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -452,10 +452,10 @@ void gen8_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN8_IRQ_RESET_NDX(uncore, GT, 0); - GEN8_IRQ_RESET_NDX(uncore, GT, 1); - GEN8_IRQ_RESET_NDX(uncore, GT, 2); - GEN8_IRQ_RESET_NDX(uncore, GT, 3); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(0)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(1)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(2)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(3)); } void gen8_gt_irq_postinstall(struct intel_gt *gt) @@ -476,14 +476,14 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) gt->pm_ier = 0x0; gt->pm_imr = ~gt->pm_ier; - GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); - GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(0), ~gt_interrupts[0], gt_interrupts[0]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. Same wil be the case for GuC interrupts. */ - GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); - GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]); } static void gen5_gt_update_irq(struct intel_gt *gt, @@ -514,9 +514,9 @@ void gen5_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN3_IRQ_RESET(uncore, GT); + gen2_irq_reset(uncore, GT_IRQ_REGS); if (GRAPHICS_VER(gt->i915) >= 6) - GEN3_IRQ_RESET(uncore, GEN6_PM); + gen2_irq_reset(uncore, GEN6_PM_IRQ_REGS); } void gen5_gt_irq_postinstall(struct intel_gt *gt) @@ -538,7 +538,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) else gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; - GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); + gen2_irq_init(uncore, GT_IRQ_REGS, gt->gt_imr, gt_irqs); if (GRAPHICS_VER(gt->i915) >= 6) { /* @@ -551,6 +551,6 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) } gt->pm_imr = 0xffffffff; - GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); + gen2_irq_init(uncore, GEN6_PM_IRQ_REGS, gt->pm_imr, pm_irqs); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 911fd0160221..6f25c747bc29 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -35,7 +35,7 @@ static inline void __intel_gt_pm_get(struct intel_gt *gt) static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt) { if (!intel_wakeref_get_if_active(>->wakeref)) - return 0; + return NULL; return intel_wakeref_track(>->wakeref); } @@ -73,7 +73,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha } #define with_intel_gt_pm(gt, wf) \ - for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get(gt); (wf); intel_gt_pm_put((gt), (wf)), (wf) = NULL) /** * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent @@ -84,7 +84,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha * @wf: pointer to a temporary wakeref. */ #define with_intel_gt_pm_if_awake(gt, wf) \ - for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get_if_awake(gt); (wf); intel_gt_pm_put_async((gt), (wf)), (wf) = NULL) static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { @@ -105,9 +105,13 @@ int intel_gt_runtime_resume(struct intel_gt *gt); ktime_t intel_gt_get_awake_time(const struct intel_gt *gt); +#define INTEL_WAKEREF_MOCK_GT ERR_PTR(-ENODEV) + static inline bool is_mock_gt(const struct intel_gt *gt) { - return I915_SELFTEST_ONLY(gt->awake == -ENODEV); + BUILD_BUG_ON(INTEL_WAKEREF_DEF == INTEL_WAKEREF_MOCK_GT); + + return I915_SELFTEST_ONLY(gt->awake == INTEL_WAKEREF_MOCK_GT); } #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 8d08b38874ef..b635aa2820d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -431,7 +431,7 @@ static int llc_show(struct seq_file *m, void *data) max_gpu_freq /= GEN9_FREQ_SCALER; } - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n"); wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 57a3c83d3655..6dba65e54cdb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -432,6 +432,7 @@ #define XEHPG_INSTDONE_GEOM_SVG MCR_REG(0x666c) #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ +#define DISABLE_REPACKING_FOR_COMPRESSION REG_BIT(15) /* jsl+ */ #define RC_OP_FLUSH_ENABLE (1 << 0) #define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) #define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ @@ -1472,6 +1473,10 @@ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define GEN6_PM_IRQ_REGS I915_IRQ_REGS(GEN6_PMIMR, \ + GEN6_PMIER, \ + GEN6_PMIIR) + #define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4) #define GEN7_GT_SCRATCH_REG_NUM 8 diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7bd5d2c29056..51847a846002 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -820,8 +820,10 @@ static bool ctx_needs_runalone(const struct intel_context *ce) bool ctx_is_protected = false; /* - * On MTL and newer platforms, protected contexts require setting - * the LRC run-alone bit or else the encryption will not happen. + * Wa_14019159160 - Case 2. + * On some platforms, protected contexts require setting + * the LRC run-alone bit or else the encryption/decryption will not happen. + * NOTE: Case 2 only applies to PXP use-case of said workaround. */ if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) && (ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) { @@ -850,6 +852,7 @@ static void init_common_regs(u32 * const regs, if (GRAPHICS_VER(engine->i915) < 11) ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); + /* Wa_14019159160 - Case 2.*/ if (ctx_needs_runalone(ce)) ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE); regs[CTX_CONTEXT_CONTROL] = ctl; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8f1ea95471ef..f42f21632306 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1233,7 +1233,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_disable_interrupts(gt->i915); + intel_irq_suspend(gt->i915); if (do_reset(gt, stalled_mask)) { gt_err(gt, "Failed to reset chip\n"); @@ -1241,7 +1241,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_enable_interrupts(gt->i915); + intel_irq_resume(gt->i915); intel_overlay_reset(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e..32f3b52a183a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -192,6 +192,7 @@ static bool stop_ring(struct intel_engine_cs *engine) static int xcs_resume(struct intel_engine_cs *engine) { struct intel_ring *ring = engine->legacy.ring; + ktime_t kt; ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", ring->head, ring->tail); @@ -230,9 +231,27 @@ static int xcs_resume(struct intel_engine_cs *engine) set_pp_dir(engine); /* First wake the ring up to an empty/idle ring */ - ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC); + ktime_before(ktime_get(), (kt)); cpu_relax()) { + /* + * In case of resets fails because engine resumes from + * incorrect RING_HEAD and then GPU may be then fed + * to invalid instrcutions, which may lead to unrecoverable + * hang. So at first write doesn't succeed then try again. + */ + ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head) + break; + } + ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); - ENGINE_POSTING_READ(engine, RING_TAIL); + if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) { + ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n", + ENGINE_READ_FW(engine, RING_HEAD), + ENGINE_READ_FW(engine, RING_TAIL), + ring->head); + goto err; + } ENGINE_WRITE_FW(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); @@ -241,12 +260,16 @@ static int xcs_resume(struct intel_engine_cs *engine) if (__intel_wait_for_register_fw(engine->uncore, RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, - 5000, 0, NULL)) + 5000, 0, NULL)) { + ENGINE_TRACE(engine, "failed to restart\n"); goto err; + } - if (GRAPHICS_VER(engine->i915) > 2) + if (GRAPHICS_VER(engine->i915) > 2) { ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_POSTING_READ(engine, RING_MI_MODE); + } /* Now awake, let it get started */ if (ring->tail != ring->head) { @@ -1090,9 +1113,6 @@ static void setup_irq(struct intel_engine_cs *engine) } else if (GRAPHICS_VER(i915) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - } else if (GRAPHICS_VER(i915) >= 3) { - engine->irq_enable = gen3_irq_enable; - engine->irq_disable = gen3_irq_disable; } else { engine->irq_enable = gen2_irq_enable; engine->irq_disable = gen2_irq_disable; @@ -1146,7 +1166,7 @@ static void setup_common(struct intel_engine_cs *engine) * equivalent to our next initial bread so we can elide * engine->emit_init_breadcrumb(). */ - engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen2_emit_breadcrumb; if (GRAPHICS_VER(i915) == 5) engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; @@ -1159,7 +1179,7 @@ static void setup_common(struct intel_engine_cs *engine) else if (IS_I830(i915) || IS_I845G(i915)) engine->emit_bb_start = i830_emit_bb_start; else - engine->emit_bb_start = gen3_emit_bb_start; + engine->emit_bb_start = gen2_emit_bb_start; } static void setup_rcs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c index 756e9ebbc725..2487768bc230 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.c +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -122,7 +122,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) { intel_wakeref_t wakeref; - if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + if (is_mock_gt(gt)) return; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e539a656cfc3..570c91878189 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -418,7 +418,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -2299,6 +2299,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * "Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display." + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } + if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -2537,7 +2546,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_FF_DS_SCHED_HW); /* WaDisablePSDDualDispatchEnable:ivb */ - if (IS_IVB_GT1(i915)) + if (INTEL_INFO(i915)->gt == 1) wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1, GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index 1fb6ff77fd89..bb696b29ee2c 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -40,7 +40,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) if (i915_gem_object_is_shmem(obj)) { file = obj->base.filp; - atomic_long_inc(&file->f_count); + get_file(file); return file; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 097fc6bd1285..5949ff0b0161 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) static u32 guc_ctl_feature_flags(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); u32 flags = 0; + /* + * Enable PXP GuC autoteardown flow. + * NB: MTL does things differently. + */ + if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915)) + flags |= GUC_CTL_ENABLE_GUC_PXP_CTL; + if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 23f54c84cbab..fe53e8eccf4b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -145,7 +145,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOAD_RETRY_LIMIT 20 #else #define GUC_LOAD_RETRY_LIMIT 3 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 263c9c3f6a03..4ce6e2332a63 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -105,6 +105,7 @@ #define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 +#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1) #define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index bf16351c9349..222c95f62156 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -14,7 +14,7 @@ #include "intel_guc_log.h" #include "intel_guc_print.h" -#if defined(CONFIG_DRM_I915_DEBUG_GUC) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ed979847187f..9ede6f240d79 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1339,7 +1339,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt); + wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt); if (wakeref) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2d9152eb7282..d7ac31c3254c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -455,7 +455,7 @@ static const char *auth_mode_string(struct intel_huc *huc, * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define HUC_LOAD_RETRY_LIMIT 20 #else #define HUC_LOAD_RETRY_LIMIT 3 diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2f4c9c66b40b..81d67a46cd9e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -50,7 +50,6 @@ #include "trace.h" #include "display/i9xx_plane_regs.h" -#include "display/intel_display.h" #include "display/intel_sprite_regs.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c66d6d3177c8..17f74cb244bb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -32,6 +32,8 @@ * */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -568,7 +570,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + port->dpcd->data[DP_SINK_COUNT] = 0x1; port->type = type; port->id = resolution; port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC; diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index f5616f99ef2f..8090bc53c7e1 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -59,52 +59,10 @@ struct intel_vgpu; #define INTEL_GVT_MAX_UEVENT_VARS 3 -/* DPCD start */ -#define DPCD_SIZE 0x700 - -/* DPCD */ -#define DP_SET_POWER 0x600 -#define DP_SET_POWER_D0 0x1 -#define AUX_NATIVE_WRITE 0x8 -#define AUX_NATIVE_READ 0x9 - -#define AUX_NATIVE_REPLY_MASK (0x3 << 4) -#define AUX_NATIVE_REPLY_ACK (0x0 << 4) #define AUX_NATIVE_REPLY_NAK (0x1 << 4) -#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) #define AUX_BURST_SIZE 20 -/* DPCD addresses */ -#define DPCD_REV 0x000 -#define DPCD_MAX_LINK_RATE 0x001 -#define DPCD_MAX_LANE_COUNT 0x002 - -#define DPCD_TRAINING_PATTERN_SET 0x102 -#define DPCD_SINK_COUNT 0x200 -#define DPCD_LANE0_1_STATUS 0x202 -#define DPCD_LANE2_3_STATUS 0x203 -#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 -#define DPCD_SINK_STATUS 0x205 - -/* link training */ -#define DPCD_TRAINING_PATTERN_SET_MASK 0x03 -#define DPCD_LINK_TRAINING_DISABLED 0x00 -#define DPCD_TRAINING_PATTERN_1 0x01 -#define DPCD_TRAINING_PATTERN_2 0x02 - -#define DPCD_CP_READY_MASK (1 << 6) - -/* lane status */ -#define DPCD_LANES_CR_DONE 0x11 -#define DPCD_LANES_EQ_DONE 0x22 -#define DPCD_SYMBOL_LOCKED 0x44 - -#define DPCD_INTERLANE_ALIGN_DONE 0x01 - -#define DPCD_SINK_IN_SYNC 0x03 -/* DPCD end */ - #define SBI_RESPONSE_MASK 0x3 #define SBI_RESPONSE_SHIFT 0x1 #define SBI_STAT_MASK 0x1 diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index c022dc736045..0a357ca42db1 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -32,6 +32,8 @@ * */ +#include <drm/display/drm_dp.h> + #include "display/intel_dp_aux_regs.h" #include "display/intel_gmbus_regs.h" #include "gvt.h" @@ -504,13 +506,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } /* Always set the wanted value for vms. */ - ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1); + ret_msg_size = (((op & 0x1) == DP_AUX_I2C_READ) ? 2 : 1); vgpu_vreg(vgpu, offset) = DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size); if (msg_length == 3) { - if (!(op & GVT_AUX_I2C_MOT)) { + if (!(op & DP_AUX_I2C_MOT)) { /* stop */ intel_vgpu_init_i2c_edid(vgpu); } else { @@ -530,7 +532,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, i2c_edid->edid_available = true; } } - } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { + } else if ((op & 0x1) == DP_AUX_I2C_WRITE) { /* TODO * We only support EDID reading from I2C_over_AUX. And * we do not expect the index mode to be used. Right now @@ -538,7 +540,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * support the gfx driver to do EDID access. */ } else { - if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ)) + if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ)) return; if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; @@ -553,7 +555,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * ACK of I2C_WRITE * returned byte if it is READ */ - aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24; + aux_data_for_write |= DP_AUX_I2C_REPLY_ACK << 24; vgpu_vreg(vgpu, offset + 4) = aux_data_for_write; } diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index c3b5a55aecb3..13fd06590929 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -42,14 +42,6 @@ struct intel_vgpu; #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ -#define GVT_AUX_NATIVE_WRITE 0x8 -#define GVT_AUX_NATIVE_READ 0x9 -#define GVT_AUX_I2C_WRITE 0x0 -#define GVT_AUX_I2C_READ 0x1 -#define GVT_AUX_I2C_STATUS 0x2 -#define GVT_AUX_I2C_MOT 0x4 -#define GVT_AUX_I2C_REPLY_ACK 0x0 - struct intel_vgpu_edid_data { bool data_valid; unsigned char edid_block[EDID_SIZE]; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 58cca4906f41..1bce1493b86f 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1190,7 +1190,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ppgtt_set_shadow_entry(spt, se, index); return 0; err: - /* Cancel the existing addess mappings of DMA addr. */ + /* Cancel the existing address mappings of DMA addr. */ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(sub_spt, &sub_se); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0f09344d3c20..9494d812c00a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -36,6 +36,8 @@ */ +#include <drm/display/drm_dp.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -1129,29 +1131,36 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, u8 t) { - if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { + if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) { /* training pattern 1 for CR */ /* set LANE0_CR_DONE, LANE1_CR_DONE */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; /* set LANE2_CR_DONE, LANE3_CR_DONE */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_TRAINING_PATTERN_2) { + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE | + DP_LANE_CR_DONE << 4; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_2) { /* training pattern 2 for EQ */ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */ - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */ - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE; - dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE | + DP_LANE_CHANNEL_EQ_DONE << 4; + dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED | + DP_LANE_SYMBOL_LOCKED << 4; /* set INTERLANE_ALIGN_DONE */ - dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |= - DPCD_INTERLANE_ALIGN_DONE; - } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == - DPCD_LINK_TRAINING_DISABLED) { + dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |= + DP_INTERLANE_ALIGN_DONE; + } else if ((t & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_DISABLE) { /* finish link training */ /* set sink status as synchronized */ - dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC; + dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS | + DP_RECEIVE_PORT_1_STATUS; } } @@ -1206,7 +1215,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, len = msg & 0xff; op = ctrl >> 4; - if (op == GVT_AUX_NATIVE_WRITE) { + if (op == DP_AUX_NATIVE_WRITE) { int t; u8 buf[16]; @@ -1252,7 +1261,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, dpcd->data[p] = buf[t]; /* check for link training */ - if (p == DPCD_TRAINING_PATTERN_SET) + if (p == DP_TRAINING_PATTERN_SET) dp_aux_ch_ctl_link_training(dpcd, buf[t]); } @@ -1265,7 +1274,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } - if (op == GVT_AUX_NATIVE_READ) { + if (op == DP_AUX_NATIVE_READ) { int idx, i, ret = 0; if ((addr + len + 1) >= DPCD_SIZE) { diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 908f910420c2..509f9ccae3a9 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -439,7 +439,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) gvt_vgpu_err("requesting SMI service\n"); return 0; } - /* ignore non 0->1 trasitions */ + /* ignore non 0->1 transitions */ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI] & SWSCI_SCI_TRIGGER) || !(swsci & SWSCI_SCI_TRIGGER)) { diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 60a65435556d..20c3cd807488 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -167,7 +167,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, return -ENXIO; if (unlikely(vgpu->failsafe)) { - /* Remove write protection to prevent furture traps. */ + /* Remove write protection to prevent future traps. */ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a5c8005ec484..23f2cc397ec9 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1052,7 +1052,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; - /* free the unsubmited workloads in the queues. */ + /* free the unsubmitted workloads in the queues. */ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 5ec293011d99..35319228bc51 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -212,7 +212,7 @@ active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) struct i915_active_fence *active = container_of(cb, typeof(*active), cb); - return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; + return try_cmpxchg(__active_fence_slot(active), &fence, NULL); } static void diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f969f585d07b..1c2a97f593c7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -33,8 +33,6 @@ #include <linux/debugfs.h> #include <drm/drm_debugfs.h> -#include "display/intel_display_params.h" - #include "gem/i915_gem_context.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" @@ -66,7 +64,6 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) static int i915_capabilities(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_display *display = &i915->display; struct drm_printer p = drm_seq_file_printer(m); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); @@ -76,10 +73,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); - kernel_param_lock(THIS_MODULE); i915_params_dump(&i915->params, &p); - intel_display_params_dump(display, &p); - kernel_param_unlock(THIS_MODULE); return 0; } diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index a40f05b993da..365329ff8a07 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -27,6 +27,7 @@ * */ +#include <linux/aperture.h> #include <linux/acpi.h> #include <linux/device.h> #include <linux/module.h> @@ -39,7 +40,6 @@ #include <linux/vga_switcheroo.h> #include <linux/vt.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -48,8 +48,8 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" +#include "display/intel_crtc.h" #include "display/intel_display_driver.h" -#include "display/intel_display.h" #include "display/intel_dmc.h" #include "display/intel_dp.h" #include "display/intel_dpt.h" @@ -59,7 +59,7 @@ #include "display/intel_overlay.h" #include "display/intel_pch_refclk.h" #include "display/intel_pps.h" -#include "display/intel_sprite.h" +#include "display/intel_sprite_uapi.h" #include "display/skl_watermark.h" #include "gem/i915_gem_context.h" @@ -485,7 +485,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_perf; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); + ret = aperture_remove_conflicting_pci_devices(pdev, dev_priv->drm.driver->name); if (ret) goto err_ggtt; @@ -950,7 +950,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_dp_mst_suspend(i915); - intel_runtime_pm_disable_interrupts(i915); + intel_irq_suspend(i915); intel_hpd_cancel_work(i915); if (HAS_DISPLAY(i915)) @@ -959,7 +959,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_encoder_suspend_all(&i915->display); intel_encoder_shutdown_all(&i915->display); - intel_dmc_suspend(i915); + intel_dmc_suspend(&i915->display); i915_gem_suspend(i915); @@ -1035,7 +1035,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_dp_mst_suspend(dev_priv); - intel_runtime_pm_disable_interrupts(dev_priv); + intel_irq_suspend(dev_priv); intel_hpd_cancel_work(dev_priv); if (HAS_DISPLAY(dev_priv)) @@ -1054,7 +1054,7 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->suspend_count++; - intel_dmc_suspend(dev_priv); + intel_dmc_suspend(display); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1164,7 +1164,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Must be called after GGTT is resumed. */ intel_dpt_resume(dev_priv); - intel_dmc_resume(dev_priv); + intel_dmc_resume(display); i915_restore_display(dev_priv); intel_pps_unlock_regs_wa(display); @@ -1181,7 +1181,7 @@ static int i915_drm_resume(struct drm_device *dev) * Modeset enabling in intel_display_driver_init_hw() also needs working * interrupts. */ - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); if (HAS_DISPLAY(dev_priv)) drm_mode_config_reset(dev); @@ -1481,7 +1481,7 @@ static int intel_runtime_suspend(struct device *kdev) for_each_gt(gt, dev_priv, i) intel_gt_runtime_suspend(gt); - intel_runtime_pm_disable_interrupts(dev_priv); + intel_irq_suspend(dev_priv); for_each_gt(gt, dev_priv, i) intel_uncore_suspend(gt->uncore); @@ -1494,7 +1494,7 @@ static int intel_runtime_suspend(struct device *kdev) "Runtime suspend failed, disabling it (%d)\n", ret); intel_uncore_runtime_resume(&dev_priv->uncore); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); for_each_gt(gt, dev_priv, i) intel_gt_runtime_resume(gt); @@ -1587,7 +1587,7 @@ static int intel_runtime_resume(struct device *kdev) for_each_gt(gt, dev_priv, i) intel_uncore_runtime_resume(gt->uncore); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_irq_resume(dev_priv); /* * No point of rolling back things in case of an error, as the best @@ -1725,7 +1725,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_crtc_get_pipe_from_crtc_id_ioctl, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aa0b1bfb38e0..7b1a061d92fb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -234,6 +234,7 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + bool irqs_enabled; /* Sideband mailbox protection */ struct mutex sb_lock; @@ -343,8 +344,6 @@ struct drm_i915_private { struct intel_pxp *pxp; - bool irq_enabled; - struct i915_pmu pmu; /* The TTM device structure. */ @@ -508,8 +507,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915)) #define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE) #define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE) -#define IS_IVB_GT1(i915) (IS_IVYBRIDGE(i915) && \ - INTEL_INFO(i915)->gt == 1) #define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW) #define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW) #define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL) @@ -539,6 +536,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define IS_LUNARLAKE(i915) (0 && i915) #define IS_BATTLEMAGE(i915) (0 && i915) +#define IS_PANTHERLAKE(i915) (0 && i915) #define IS_ARROWLAKE_H(i915) \ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H) @@ -566,14 +564,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) #define IS_BROADWELL_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) -#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \ - INTEL_INFO(i915)->gt == 3) #define IS_HASWELL_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) -#define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \ - INTEL_INFO(i915)->gt == 3) -#define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \ - INTEL_INFO(i915)->gt == 1) /* ULX machines are also considered ULT. */ #define IS_HASWELL_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) @@ -585,31 +577,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) #define IS_KABYLAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) -#define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 4) -#define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) #define IS_COFFEELAKE_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) #define IS_COFFEELAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) -#define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) -#define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \ - INTEL_INFO(i915)->gt == 3) - #define IS_COMETLAKE_ULT(i915) \ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) #define IS_COMETLAKE_ULX(i915) \ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \ - INTEL_INFO(i915)->gt == 2) #define IS_ICL_WITH_PORT_F(i915) \ IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) @@ -617,9 +592,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TIGERLAKE_UY(i915) \ IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) -#define IS_LP(i915) (INTEL_INFO(i915)->is_lp) -#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915)) -#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915)) +#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) +#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915)) #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) @@ -683,7 +657,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* WaRsDisableCoarsePowerGating:skl,cnl */ #define NEEDS_WaRsDisableCoarsePowerGating(i915) \ - (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915)) + (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -697,6 +671,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps) +#define HAS_PXP(i915) \ + (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp) + #define HAS_HECI_PXP(i915) \ (INTEL_INFO(i915)->has_heci_pxp) @@ -744,7 +721,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* DPF == dynamic parity feature */ #define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf) -#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \ +#define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \ 2 : HAS_L3_DPF(i915)) #define HAS_GUC_DEPRIVILEGE(i915) \ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6469b9bcf2ec..135ded17334e 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -40,8 +40,7 @@ #include <drm/drm_cache.h> #include <drm/drm_print.h> -#include "display/intel_dmc.h" -#include "display/intel_overlay.h" +#include "display/intel_display_snapshot.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" @@ -651,8 +650,6 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, struct drm_printer p = i915_error_printer(m); intel_device_info_print(&error->device_info, &error->runtime_info, &p); - intel_display_device_info_print(&error->display_device_info, - &error->display_runtime_info, &p); intel_driver_caps_print(&error->driver_caps, &p); } @@ -660,10 +657,8 @@ static void err_print_params(struct drm_i915_error_state_buf *m, const struct i915_params *params) { struct drm_printer p = i915_error_printer(m); - struct intel_display *display = &m->i915->display; i915_params_dump(params, &p); - intel_display_params_dump(display, &p); } static void err_print_pciid(struct drm_i915_error_state_buf *m, @@ -875,8 +870,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_printf(m, "IOMMU enabled?: %d\n", error->iommu); - intel_dmc_print_error_state(&p, m->i915); - err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock)); err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended)); @@ -905,11 +898,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, err_print_gt_info(m, error->gt); } - if (error->overlay) - intel_overlay_print_error_state(&p, error->overlay); - err_print_capabilities(m, error); err_print_params(m, &error->params); + + intel_display_snapshot_print(error->display_snapshot, &p); } static int err_print_to_sgl(struct i915_gpu_coredump *error) @@ -1032,7 +1024,6 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) static void cleanup_params(struct i915_gpu_coredump *error) { i915_params_free(&error->params); - intel_display_params_free(&error->display_params); } static void cleanup_uc(struct intel_uc_coredump *uc) @@ -1077,7 +1068,7 @@ void __i915_gpu_coredump_free(struct kref *error_ref) cleanup_gt(gt); } - kfree(error->overlay); + intel_display_snapshot_free(error->display_snapshot); cleanup_params(error); @@ -1993,17 +1984,12 @@ static void capture_gen(struct i915_gpu_coredump *error) error->suspend_count = i915->suspend_count; i915_params_copy(&error->params, &i915->params); - intel_display_params_copy(&error->display_params); memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); memcpy(&error->runtime_info, RUNTIME_INFO(i915), sizeof(error->runtime_info)); - memcpy(&error->display_device_info, DISPLAY_INFO(i915), - sizeof(error->display_device_info)); - memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915), - sizeof(error->display_runtime_info)); error->driver_caps = i915->caps; } @@ -2097,6 +2083,7 @@ static struct i915_gpu_coredump * __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) { struct drm_i915_private *i915 = gt->i915; + struct intel_display *display = &i915->display; struct i915_gpu_coredump *error; /* Check if GPU capture has been disabled */ @@ -2138,7 +2125,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du error->simulated |= error->gt->simulated; } - error->overlay = intel_overlay_capture_error_state(i915); + error->display_snapshot = intel_display_snapshot_capture(display); return error; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 7c255bb1c319..78a8928562a9 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -14,8 +14,6 @@ #include <drm/drm_mm.h> -#include "display/intel_display_device.h" -#include "display/intel_display_params.h" #include "gt/intel_engine.h" #include "gt/intel_engine_types.h" #include "gt/intel_gt_types.h" @@ -31,7 +29,7 @@ struct drm_i915_private; struct i915_vma_compress; struct intel_engine_capture_vma; -struct intel_overlay_error_state; +struct intel_display_snapshot; struct i915_vma_coredump { struct i915_vma_coredump *next; @@ -212,15 +210,12 @@ struct i915_gpu_coredump { struct intel_device_info device_info; struct intel_runtime_info runtime_info; - struct intel_display_device_info display_device_info; - struct intel_display_runtime_info display_runtime_info; struct intel_driver_caps driver_caps; struct i915_params params; - struct intel_display_params display_params; - - struct intel_overlay_error_state *overlay; struct scatterlist *sgl, *fit; + + struct intel_display_snapshot *display_snapshot; }; struct i915_gpu_error { diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 17d30f6b84b0..7dfe1784153f 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -7,6 +7,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/types.h> +#include <linux/units.h> #include "i915_drv.h" #include "i915_hwmon.h" @@ -32,6 +33,7 @@ struct hwm_reg { i915_reg_t gt_perf_status; + i915_reg_t pkg_temp; i915_reg_t pkg_power_sku_unit; i915_reg_t pkg_power_sku; i915_reg_t pkg_rapl_limit; @@ -280,6 +282,7 @@ static const struct attribute_group *hwm_groups[] = { }; static const struct hwmon_channel_info * const hwm_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT), HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT), @@ -311,6 +314,37 @@ static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval) } static umode_t +hwm_temp_is_visible(const struct hwm_drvdata *ddat, u32 attr) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + + if (attr == hwmon_temp_input && i915_mmio_reg_valid(hwmon->rg.pkg_temp)) + return 0444; + + return 0; +} + +static int +hwm_temp_read(struct hwm_drvdata *ddat, u32 attr, long *val) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + intel_wakeref_t wakeref; + u32 reg_val; + + switch (attr) { + case hwmon_temp_input: + with_intel_runtime_pm(ddat->uncore->rpm, wakeref) + reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_temp); + + /* HW register value is in degrees Celsius, convert to millidegrees. */ + *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static umode_t hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr) { struct drm_i915_private *i915 = ddat->uncore->i915; @@ -692,6 +726,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type, struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata; switch (type) { + case hwmon_temp: + return hwm_temp_is_visible(ddat, attr); case hwmon_in: return hwm_in_is_visible(ddat, attr); case hwmon_power: @@ -714,6 +750,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, struct hwm_drvdata *ddat = dev_get_drvdata(dev); switch (type) { + case hwmon_temp: + return hwm_temp_read(ddat, attr, val); case hwmon_in: return hwm_in_read(ddat, attr, val); case hwmon_power: @@ -810,6 +848,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.gt_perf_status = GEN12_RPSTAT1; if (IS_DG1(i915) || IS_DG2(i915)) { + hwmon->rg.pkg_temp = PCU_PACKAGE_TEMPERATURE; hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT; hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU; hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT; @@ -817,6 +856,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.energy_status_tile = INVALID_MMIO_REG; hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED; } else { + hwmon->rg.pkg_temp = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2321de48d169..f75cbf5b8a1c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -77,39 +77,24 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915, WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); } -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); -} - -static void gen2_irq_reset(struct intel_uncore *uncore) -{ - intel_uncore_write16(uncore, GEN2_IMR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IMR); - - intel_uncore_write16(uncore, GEN2_IER, 0); - - /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { u32 val = intel_uncore_read(uncore, reg); @@ -125,42 +110,14 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) -{ - u16 val = intel_uncore_read16(uncore, GEN2_IIR); - - if (val == 0) - return; - - drm_WARN(&uncore->i915->drm, 1, - "Interrupt register 0x%x is not zero: 0x%08x\n", - i915_mmio_reg_offset(GEN2_IIR), val); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); - intel_uncore_write16(uncore, GEN2_IIR, 0xffff); - intel_uncore_posting_read16(uncore, GEN2_IIR); -} - -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) -{ - gen3_assert_iir_is_zero(uncore, iir); - - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); -} - -static void gen2_irq_init(struct intel_uncore *uncore, - u32 imr_val, u32 ier_val) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen2_assert_iir_is_zero(uncore); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write16(uncore, GEN2_IER, ier_val); - intel_uncore_write16(uncore, GEN2_IMR, imr_val); - intel_uncore_posting_read16(uncore, GEN2_IMR); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); } /** @@ -298,7 +255,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | @@ -380,7 +337,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | @@ -665,7 +622,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_NOP(dev_priv)) return; - GEN3_IRQ_RESET(uncore, SDE); + gen2_irq_reset(uncore, SDE_IRQ_REGS); if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); @@ -677,7 +634,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - GEN3_IRQ_RESET(uncore, DE); + gen2_irq_reset(uncore, DE_IRQ_REGS); dev_priv->irq_mask = ~0u; if (GRAPHICS_VER(dev_priv) == 7) @@ -714,7 +671,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) gen8_gt_irq_reset(to_gt(dev_priv)); gen8_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); @@ -731,8 +688,8 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); } static void dg1_irq_reset(struct drm_i915_private *dev_priv) @@ -748,8 +705,8 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) gen11_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0); } @@ -763,7 +720,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) gen8_gt_irq_reset(to_gt(dev_priv)); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); + gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.irq.display_irqs_enabled) @@ -808,7 +765,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) gen11_gt_irq_postinstall(gt); gen11_de_irq_postinstall(dev_priv); - GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); + gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); gen11_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); @@ -824,7 +781,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) for_each_gt(gt, dev_priv, i) gen11_gt_irq_postinstall(gt); - GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); + gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); dg1_de_irq_postinstall(dev_priv); @@ -845,16 +802,6 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); } -static void i8xx_irq_reset(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - i9xx_pipestat_irq_reset(dev_priv); - - gen2_irq_reset(uncore); - dev_priv->irq_mask = ~0u; -} - static u32 i9xx_error_mask(struct drm_i915_private *i915) { /* @@ -876,76 +823,6 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915) I915_ERROR_MEMORY_REFRESH); } -static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u16 enable_mask; - - intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); - - /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask = - ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT); - - enable_mask = - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_MASTER_ERROR_INTERRUPT | - I915_USER_INTERRUPT; - - gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask); - - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void i8xx_error_irq_ack(struct drm_i915_private *i915, - u16 *eir, u16 *eir_stuck) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 emr; - - *eir = intel_uncore_read16(uncore, EIR); - intel_uncore_write16(uncore, EIR, *eir); - - *eir_stuck = intel_uncore_read16(uncore, EIR); - if (*eir_stuck == 0) - return; - - /* - * Toggle all EMR bits to make sure we get an edge - * in the ISR master error bit if we don't clear - * all the EIR bits. Otherwise the edge triggered - * IIR on i965/g4x wouldn't notice that an interrupt - * is still pending. Also some EIR bits can't be - * cleared except by handling the underlying error - * (or by a GPU reset) so we mask any bit that - * remains set. - */ - emr = intel_uncore_read16(uncore, EMR); - intel_uncore_write16(uncore, EMR, 0xffff); - intel_uncore_write16(uncore, EMR, emr | *eir_stuck); -} - -static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, - u16 eir, u16 eir_stuck) -{ - drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir); - - if (eir_stuck) - drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", - eir_stuck); - - drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", - intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); -} - static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, u32 *eir, u32 *eir_stuck) { @@ -986,66 +863,13 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } -static irqreturn_t i8xx_irq_handler(int irq, void *arg) -{ - struct drm_i915_private *dev_priv = arg; - irqreturn_t ret = IRQ_NONE; - - if (!intel_irqs_enabled(dev_priv)) - return IRQ_NONE; - - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - - do { - u32 pipe_stats[I915_MAX_PIPES] = {}; - u16 eir = 0, eir_stuck = 0; - u16 iir; - - iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); - if (iir == 0) - break; - - ret = IRQ_HANDLED; - - /* Call regardless, as some status bits might not be - * signalled in iir */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); - - if (iir & I915_MASTER_ERROR_INTERRUPT) - i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); - - intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); - - if (iir & I915_USER_INTERRUPT) - intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); - - if (iir & I915_MASTER_ERROR_INTERRUPT) - i8xx_error_irq_handler(dev_priv, eir, eir_stuck); - - i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); - } while (0); - - pmu_irq_stats(dev_priv, ret); - - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - - return ret; -} - static void i915_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - if (I915_HAS_HOTPLUG(dev_priv)) { - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - intel_uncore_rmw(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv), 0, 0); - } - - i9xx_pipestat_irq_reset(dev_priv); + i9xx_display_irq_reset(dev_priv); - GEN3_IRQ_RESET(uncore, GEN2_); + gen2_irq_reset(uncore, GEN2_IRQ_REGS); dev_priv->irq_mask = ~0u; } @@ -1056,28 +880,28 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); - /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = - ~(I915_ASLE_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_MASTER_ERROR_INTERRUPT); enable_mask = - I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; + if (DISPLAY_VER(dev_priv) >= 3) { + dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; + enable_mask |= I915_ASLE_INTERRUPT; + } + if (I915_HAS_HOTPLUG(dev_priv)) { - /* Enable in IER... */ - enable_mask |= I915_DISPLAY_PORT_INTERRUPT; - /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } - GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -1117,7 +941,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) @@ -1148,12 +972,9 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0); + i9xx_display_irq_reset(dev_priv); - i9xx_pipestat_irq_reset(dev_priv); - - GEN3_IRQ_RESET(uncore, GEN2_); + gen2_irq_reset(uncore, GEN2_IRQ_REGS); dev_priv->irq_mask = ~0u; } @@ -1183,7 +1004,6 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); - /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PORT_INTERRUPT | @@ -1202,7 +1022,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; - GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -1242,7 +1062,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be - * signalled in iir */ + * signalled in IIR */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) @@ -1317,10 +1137,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) return valleyview_irq_handler; else if (GRAPHICS_VER(dev_priv) == 4) return i965_irq_handler; - else if (GRAPHICS_VER(dev_priv) == 3) - return i915_irq_handler; else - return i8xx_irq_handler; + return i915_irq_handler; } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) return dg1_irq_handler; @@ -1342,10 +1160,8 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv) valleyview_irq_reset(dev_priv); else if (GRAPHICS_VER(dev_priv) == 4) i965_irq_reset(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 3) - i915_irq_reset(dev_priv); else - i8xx_irq_reset(dev_priv); + i915_irq_reset(dev_priv); } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) dg1_irq_reset(dev_priv); @@ -1367,10 +1183,8 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv) valleyview_irq_postinstall(dev_priv); else if (GRAPHICS_VER(dev_priv) == 4) i965_irq_postinstall(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 3) - i915_irq_postinstall(dev_priv); else - i8xx_irq_postinstall(dev_priv); + i915_irq_postinstall(dev_priv); } else { if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) dg1_irq_postinstall(dev_priv); @@ -1404,16 +1218,14 @@ int intel_irq_install(struct drm_i915_private *dev_priv) * interrupts as enabled _before_ actually enabling them to avoid * special cases in our ordering checks. */ - dev_priv->runtime_pm.irqs_enabled = true; - - dev_priv->irq_enabled = true; + dev_priv->irqs_enabled = true; intel_irq_reset(dev_priv); ret = request_irq(irq, intel_irq_handler(dev_priv), IRQF_SHARED, DRIVER_NAME, dev_priv); if (ret < 0) { - dev_priv->irq_enabled = false; + dev_priv->irqs_enabled = false; return ret; } @@ -1433,56 +1245,46 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { int irq = to_pci_dev(dev_priv->drm.dev)->irq; - /* - * FIXME we can get called twice during driver probe - * error handling as well as during driver remove due to - * intel_display_driver_remove() calling us out of sequence. - * Would be nice if it didn't do that... - */ - if (!dev_priv->irq_enabled) + if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) return; - dev_priv->irq_enabled = false; - intel_irq_reset(dev_priv); free_irq(irq, dev_priv); intel_hpd_cancel_work(dev_priv); - dev_priv->runtime_pm.irqs_enabled = false; + dev_priv->irqs_enabled = false; } /** - * intel_runtime_pm_disable_interrupts - runtime interrupt disabling - * @dev_priv: i915 device instance + * intel_irq_suspend - Suspend interrupts + * @i915: i915 device instance * - * This function is used to disable interrupts at runtime, both in the runtime - * pm and the system suspend/resume code. + * This function is used to disable interrupts at runtime. */ -void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) +void intel_irq_suspend(struct drm_i915_private *i915) { - intel_irq_reset(dev_priv); - dev_priv->runtime_pm.irqs_enabled = false; - intel_synchronize_irq(dev_priv); + intel_irq_reset(i915); + i915->irqs_enabled = false; + intel_synchronize_irq(i915); } /** - * intel_runtime_pm_enable_interrupts - runtime interrupt enabling - * @dev_priv: i915 device instance + * intel_irq_resume - Resume interrupts + * @i915: i915 device instance * - * This function is used to enable interrupts at runtime, both in the runtime - * pm and the system suspend/resume code. + * This function is used to enable interrupts at runtime. */ -void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) +void intel_irq_resume(struct drm_i915_private *i915) { - dev_priv->runtime_pm.irqs_enabled = true; - intel_irq_reset(dev_priv); - intel_irq_postinstall(dev_priv); + i915->irqs_enabled = true; + intel_irq_reset(i915); + intel_irq_postinstall(i915); } bool intel_irqs_enabled(struct drm_i915_private *dev_priv) { - return dev_priv->runtime_pm.irqs_enabled; + return dev_priv->irqs_enabled; } void intel_synchronize_irq(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index e665a1b007dc..0457f6402e05 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -34,45 +34,17 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask); -void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); -void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); +void intel_irq_suspend(struct drm_i915_private *i915); +void intel_irq_resume(struct drm_i915_private *i915); bool intel_irqs_enabled(struct drm_i915_private *dev_priv); void intel_synchronize_irq(struct drm_i915_private *i915); void intel_synchronize_hardirq(struct drm_i915_private *i915); -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg); +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg); -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier); +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs); -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir); - -#define GEN8_IRQ_RESET_NDX(uncore, type, which) \ -({ \ - unsigned int which_ = which; \ - gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \ - GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \ -}) - -#define GEN3_IRQ_RESET(uncore, type) \ - gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER) - -#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \ -({ \ - unsigned int which_ = which; \ - gen3_irq_init((uncore), \ - GEN8_##type##_IMR(which_), imr_val, \ - GEN8_##type##_IER(which_), ier_val, \ - GEN8_##type##_IIR(which_)); \ -}) - -#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \ - gen3_irq_init((uncore), \ - type##IMR, imr_val, \ - type##IER, ier_val, \ - type##IIR) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val); #endif /* __I915_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d37bb3a704d0..21006c7f615c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -24,7 +24,7 @@ #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include "display/intel_display_driver.h" #include "gt/intel_gt_regs.h" @@ -367,7 +367,6 @@ static const struct intel_device_info ivb_q_info = { static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), - .is_lp = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -451,7 +450,6 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .is_lp = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, @@ -512,7 +510,6 @@ static const struct intel_device_info skl_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ - .is_lp = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ @@ -870,6 +867,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_info), INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_info), INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_info), + INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_info), INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_info), {} }; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 21eb0c5b320d..93fbf53578da 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -356,7 +356,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) return GRAPHICS_VER(i915) == 7; } -static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) { struct intel_engine_pmu *pmu = &engine->pmu; bool busy; @@ -391,6 +391,31 @@ static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); } +static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + u32 tail, head, acthd; + + tail = ENGINE_READ_FW(engine, RING_TAIL); + head = ENGINE_READ_FW(engine, RING_HEAD); + acthd = ENGINE_READ_FW(engine, ACTHD); + + if (head & HEAD_WAIT_I8XX) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + + if (head & HEAD_WAIT_I8XX || head != acthd || + (head & HEAD_ADDR) != (tail & TAIL_ADDR)) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} + +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + if (GRAPHICS_VER(engine->i915) >= 3) + gen3_engine_sample(engine, period_ns); + else + gen2_engine_sample(engine, period_ns); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { @@ -834,15 +859,14 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); if (pmu->closed) goto out; if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); + i915_pmu_disable(event); out: @@ -1232,17 +1256,6 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); } -static bool is_igp(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - - /* IGP is 0000:00:02.0 */ - return pci_domain_nr(pdev->bus) == 0 && - pdev->bus->number == 0 && - PCI_SLOT(pdev->devfn) == 2 && - PCI_FUNC(pdev->devfn) == 0; -} - void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; @@ -1255,18 +1268,13 @@ void i915_pmu_register(struct drm_i915_private *i915) int ret = -ENOMEM; - if (GRAPHICS_VER(i915) <= 2) { - drm_info(&i915->drm, "PMU not supported for this GPU."); - return; - } - spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; init_rc6(pmu); - if (!is_igp(i915)) { + if (IS_DGFX(i915)) { pmu->name = kasprintf(GFP_KERNEL, "i915_%s", dev_name(i915->drm.dev)); @@ -1318,7 +1326,7 @@ err_attr: pmu->base.event_init = NULL; free_event_attributes(pmu); err_name: - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); err: drm_notice(&i915->drm, "Failed to register PMU!\n"); @@ -1346,7 +1354,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) perf_pmu_unregister(&pmu->base); pmu->base.event_init = NULL; kfree(pmu->base.attr_groups); - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); free_event_attributes(pmu); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 41f4350a7c6c..22be4a731d27 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -422,6 +422,11 @@ #define GEN2_IIR _MMIO(0x20a4) #define GEN2_IMR _MMIO(0x20a8) #define GEN2_ISR _MMIO(0x20ac) + +#define GEN2_IRQ_REGS I915_IRQ_REGS(GEN2_IMR, \ + GEN2_IER, \ + GEN2_IIR) + #define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060) #define GINT_DIS (1 << 22) #define GCFG_DIS (1 << 8) @@ -434,6 +439,10 @@ #define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120) #define VLV_PCBR_ADDR_SHIFT 12 +#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \ + VLV_IER, \ + VLV_IIR) + #define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */ #define EIR _MMIO(0x20b0) #define EMR _MMIO(0x20b4) @@ -1071,87 +1080,77 @@ /* Pipe/transcoder A timing regs */ #define _TRANS_HTOTAL_A 0x60000 +#define _TRANS_HTOTAL_B 0x61000 +#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A) #define HTOTAL_MASK REG_GENMASK(31, 16) #define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal)) #define HACTIVE_MASK REG_GENMASK(15, 0) #define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay)) + #define _TRANS_HBLANK_A 0x60004 +#define _TRANS_HBLANK_B 0x61004 +#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A) #define HBLANK_END_MASK REG_GENMASK(31, 16) #define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end)) #define HBLANK_START_MASK REG_GENMASK(15, 0) #define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start)) + #define _TRANS_HSYNC_A 0x60008 +#define _TRANS_HSYNC_B 0x61008 +#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A) #define HSYNC_END_MASK REG_GENMASK(31, 16) #define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end)) #define HSYNC_START_MASK REG_GENMASK(15, 0) #define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start)) + #define _TRANS_VTOTAL_A 0x6000c +#define _TRANS_VTOTAL_B 0x6100c +#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A) #define VTOTAL_MASK REG_GENMASK(31, 16) #define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal)) #define VACTIVE_MASK REG_GENMASK(15, 0) #define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay)) + #define _TRANS_VBLANK_A 0x60010 +#define _TRANS_VBLANK_B 0x61010 +#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A) #define VBLANK_END_MASK REG_GENMASK(31, 16) #define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end)) #define VBLANK_START_MASK REG_GENMASK(15, 0) #define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start)) + #define _TRANS_VSYNC_A 0x60014 +#define _TRANS_VSYNC_B 0x61014 +#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A) #define VSYNC_END_MASK REG_GENMASK(31, 16) #define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end)) #define VSYNC_START_MASK REG_GENMASK(15, 0) #define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start)) -#define _TRANS_EXITLINE_A 0x60018 + #define _PIPEASRC 0x6001c +#define _PIPEBSRC 0x6101c +#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC) #define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) #define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) #define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) #define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) -#define _BCLRPAT_A 0x60020 -#define _TRANS_VSYNCSHIFT_A 0x60028 -#define _TRANS_MULT_A 0x6002c -/* Pipe/transcoder B timing regs */ -#define _TRANS_HTOTAL_B 0x61000 -#define _TRANS_HBLANK_B 0x61004 -#define _TRANS_HSYNC_B 0x61008 -#define _TRANS_VTOTAL_B 0x6100c -#define _TRANS_VBLANK_B 0x61010 -#define _TRANS_VSYNC_B 0x61014 -#define _PIPEBSRC 0x6101c +#define _BCLRPAT_A 0x60020 #define _BCLRPAT_B 0x61020 -#define _TRANS_VSYNCSHIFT_B 0x61028 -#define _TRANS_MULT_B 0x6102c - -/* DSI 0 timing regs */ -#define _TRANS_HTOTAL_DSI0 0x6b000 -#define _TRANS_HSYNC_DSI0 0x6b008 -#define _TRANS_VTOTAL_DSI0 0x6b00c -#define _TRANS_VSYNC_DSI0 0x6b014 -#define _TRANS_VSYNCSHIFT_DSI0 0x6b028 - -/* DSI 1 timing regs */ -#define _TRANS_HTOTAL_DSI1 0x6b800 -#define _TRANS_HSYNC_DSI1 0x6b808 -#define _TRANS_VTOTAL_DSI1 0x6b80c -#define _TRANS_VSYNC_DSI1 0x6b814 -#define _TRANS_VSYNCSHIFT_DSI1 0x6b828 - -#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A) -#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A) -#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A) -#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A) -#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A) -#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A) #define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A) + +#define _TRANS_VSYNCSHIFT_A 0x60028 +#define _TRANS_VSYNCSHIFT_B 0x61028 #define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A) -#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC) + +#define _TRANS_MULT_A 0x6002c +#define _TRANS_MULT_B 0x6102c #define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A) /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) #define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100) - #define ADPA_DAC_ENABLE (1 << 31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SEL_SHIFT 30 @@ -1195,7 +1194,6 @@ #define ADPA_DPMS_STANDBY (2 << 10) #define ADPA_DPMS_OFF (3 << 10) - /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110) #define PORTB_HOTPLUG_INT_EN (1 << 29) @@ -1446,11 +1444,9 @@ #define DP_B _MMIO(0x64100) #define DP_C _MMIO(0x64200) #define DP_D _MMIO(0x64300) - #define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100) #define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200) #define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300) - #define DP_PORT_EN (1 << 31) #define DP_PIPE_SEL_SHIFT 30 #define DP_PIPE_SEL_MASK (1 << 30) @@ -1549,16 +1545,16 @@ */ #define _PIPEA_DATA_M_G4X 0x70050 #define _PIPEB_DATA_M_G4X 0x71050 - +#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ #define TU_SIZE_MASK REG_GENMASK(30, 25) #define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */ - #define DATA_LINK_M_N_MASK REG_GENMASK(23, 0) #define DATA_LINK_N_MAX (0x800000) #define _PIPEA_DATA_N_G4X 0x70054 #define _PIPEB_DATA_N_G4X 0x71054 +#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) /* * Computing Link M and N values for the Display Port link @@ -1570,22 +1566,22 @@ * The Link value is transmitted in the Main Stream * Attributes and VB-ID. */ - #define _PIPEA_LINK_M_G4X 0x70060 #define _PIPEB_LINK_M_G4X 0x71060 +#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) + #define _PIPEA_LINK_N_G4X 0x70064 #define _PIPEB_LINK_N_G4X 0x71064 - -#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) -#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) -#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X) #define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X) /* Pipe A */ #define _PIPEADSL 0x70000 +#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL) #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) + #define _TRANSACONF 0x70008 +#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF) #define TRANSCONF_ENABLE REG_BIT(31) #define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ #define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */ @@ -1645,6 +1641,7 @@ #define TRANSCONF_PIXEL_COUNT_SCALING_X4 1 #define _PIPEASTAT 0x70024 +#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT) #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) #define PIPE_CRC_ERROR_ENABLE (1UL << 29) @@ -1691,15 +1688,8 @@ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1) #define PIPE_HBLANK_INT_STATUS (1UL << 0) #define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0) - -#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 -#define PIPESTAT_INT_STATUS_MASK 0x0000ffff - -#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF) -#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL) -#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH) -#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL) -#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT) +#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 +#define PIPESTAT_INT_STATUS_MASK 0x0000ffff #define _PIPE_ARB_CTL_A 0x70028 /* icl+ */ #define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A) @@ -1707,6 +1697,7 @@ #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 +#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) #define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ #define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ #define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ @@ -1734,23 +1725,15 @@ #define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1) #define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2) #define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3) -#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) #define _PIPE_MISC2_A 0x7002C #define _PIPE_MISC2_B 0x7102C +#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) #define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) #define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */ #define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id)) -#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) - -#define _ICL_PIPE_A_STATUS 0x70058 -#define ICL_PIPESTATUS(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS) -#define PIPE_STATUS_UNDERRUN REG_BIT(31) -#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28) -#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27) -#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) @@ -2066,33 +2049,38 @@ * frame = (high1 << 8) | low1; */ #define _PIPEAFRAMEHIGH 0x70040 +#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH) #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 + #define _PIPEAFRAMEPIXEL 0x70044 +#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL) #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 + /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_G4X 0x70040 -#define _PIPEA_FLIPCOUNT_G4X 0x70044 #define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X) + +#define _PIPEA_FLIPCOUNT_G4X 0x70044 #define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X) /* CHV pipe B blender */ #define _CHV_BLEND_A 0x60a00 +#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A) #define CHV_BLEND_MASK REG_GENMASK(31, 30) #define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0) #define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1) #define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2) + #define _CHV_CANVAS_A 0x60a04 +#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A) #define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20) #define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10) #define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0) -#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A) -#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A) - /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) #define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK) @@ -2114,11 +2102,6 @@ #define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4) #define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4) -/* ICL DSI 0 and 1 */ -#define _PIPEDSI0CONF 0x7b008 -#define _PIPEDSI1CONF 0x7b808 - - /* VBIOS regs */ #define VGACNTRL _MMIO(0x71400) # define VGA_DISP_DISABLE (1 << 31) @@ -2156,38 +2139,42 @@ # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) #define _PIPEA_DATA_M1 0x60030 -#define _PIPEA_DATA_N1 0x60034 -#define _PIPEA_DATA_M2 0x60038 -#define _PIPEA_DATA_N2 0x6003c -#define _PIPEA_LINK_M1 0x60040 -#define _PIPEA_LINK_N1 0x60044 -#define _PIPEA_LINK_M2 0x60048 -#define _PIPEA_LINK_N2 0x6004c - -/* PIPEB timing regs are same start from 0x61000 */ - #define _PIPEB_DATA_M1 0x61030 -#define _PIPEB_DATA_N1 0x61034 -#define _PIPEB_DATA_M2 0x61038 -#define _PIPEB_DATA_N2 0x6103c -#define _PIPEB_LINK_M1 0x61040 -#define _PIPEB_LINK_N1 0x61044 -#define _PIPEB_LINK_M2 0x61048 -#define _PIPEB_LINK_N2 0x6104c - #define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1) + +#define _PIPEA_DATA_N1 0x60034 +#define _PIPEB_DATA_N1 0x61034 #define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1) + +#define _PIPEA_DATA_M2 0x60038 +#define _PIPEB_DATA_M2 0x61038 #define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2) + +#define _PIPEA_DATA_N2 0x6003c +#define _PIPEB_DATA_N2 0x6103c #define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2) + +#define _PIPEA_LINK_M1 0x60040 +#define _PIPEB_LINK_M1 0x61040 #define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1) + +#define _PIPEA_LINK_N1 0x60044 +#define _PIPEB_LINK_N1 0x61044 #define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1) + +#define _PIPEA_LINK_M2 0x60048 +#define _PIPEB_LINK_M2 0x61048 #define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2) + +#define _PIPEA_LINK_N2 0x6004c +#define _PIPEB_LINK_N2 0x6104c #define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 +#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) #define PF_ENABLE REG_BIT(31) #define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */ #define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe)) @@ -2196,37 +2183,43 @@ #define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1) #define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2) #define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3) + #define _PFA_WIN_SZ 0x68074 #define _PFB_WIN_SZ 0x68874 +#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) #define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16) #define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w)) #define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0) #define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h)) + #define _PFA_WIN_POS 0x68070 #define _PFB_WIN_POS 0x68870 +#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) #define PF_WIN_XPOS_MASK REG_GENMASK(31, 16) #define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x)) #define PF_WIN_YPOS_MASK REG_GENMASK(15, 0) #define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y)) + #define _PFA_VSCALE 0x68084 #define _PFB_VSCALE 0x68884 +#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) + #define _PFA_HSCALE 0x68090 #define _PFB_HSCALE 0x68890 - -#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) -#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) -#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) -#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) #define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) /* * Skylake scalers */ +#define _ID(id, a, b) _PICK_EVEN(id, a, b) #define _PS_1A_CTRL 0x68180 #define _PS_2A_CTRL 0x68280 #define _PS_1B_CTRL 0x68980 #define _PS_2B_CTRL 0x68A80 #define _PS_1C_CTRL 0x69180 +#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ + _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) #define PS_SCALER_EN REG_BIT(31) #define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */ #define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0) @@ -2279,6 +2272,9 @@ #define _PS_PWR_GATE_1B 0x68960 #define _PS_PWR_GATE_2B 0x68A60 #define _PS_PWR_GATE_1C 0x69160 +#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ + _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) #define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31) #define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3) #define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0) @@ -2296,6 +2292,9 @@ #define _PS_WIN_POS_1B 0x68970 #define _PS_WIN_POS_2B 0x68A70 #define _PS_WIN_POS_1C 0x69170 +#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ + _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) #define PS_WIN_XPOS_MASK REG_GENMASK(31, 16) #define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x)) #define PS_WIN_YPOS_MASK REG_GENMASK(15, 0) @@ -2306,6 +2305,9 @@ #define _PS_WIN_SZ_1B 0x68974 #define _PS_WIN_SZ_2B 0x68A74 #define _PS_WIN_SZ_1C 0x69174 +#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ + _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) #define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16) #define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w)) #define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0) @@ -2316,18 +2318,27 @@ #define _PS_VSCALE_1B 0x68984 #define _PS_VSCALE_2B 0x68A84 #define _PS_VSCALE_1C 0x69184 +#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ + _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) #define _PS_HSCALE_1A 0x68190 #define _PS_HSCALE_2A 0x68290 #define _PS_HSCALE_1B 0x68990 #define _PS_HSCALE_2B 0x68A90 #define _PS_HSCALE_1C 0x69190 +#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ + _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) #define _PS_VPHASE_1A 0x68188 #define _PS_VPHASE_2A 0x68288 #define _PS_VPHASE_1B 0x68988 #define _PS_VPHASE_2B 0x68A88 #define _PS_VPHASE_1C 0x69188 +#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ + _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) #define PS_Y_PHASE_MASK REG_GENMASK(31, 16) #define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x)) #define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0) @@ -2340,56 +2351,32 @@ #define _PS_HPHASE_1B 0x68994 #define _PS_HPHASE_2B 0x68A94 #define _PS_HPHASE_1C 0x69194 +#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ + _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) #define _PS_ECC_STAT_1A 0x681D0 #define _PS_ECC_STAT_2A 0x682D0 #define _PS_ECC_STAT_1B 0x689D0 #define _PS_ECC_STAT_2B 0x68AD0 #define _PS_ECC_STAT_1C 0x691D0 +#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ + _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ + _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) #define _PS_COEF_SET0_INDEX_1A 0x68198 #define _PS_COEF_SET0_INDEX_2A 0x68298 #define _PS_COEF_SET0_INDEX_1B 0x68998 #define _PS_COEF_SET0_INDEX_2B 0x68A98 +#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \ + _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \ + _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8) #define PS_COEF_INDEX_AUTO_INC REG_BIT(10) #define _PS_COEF_SET0_DATA_1A 0x6819C #define _PS_COEF_SET0_DATA_2A 0x6829C #define _PS_COEF_SET0_DATA_1B 0x6899C #define _PS_COEF_SET0_DATA_2B 0x68A9C - -#define _ID(id, a, b) _PICK_EVEN(id, a, b) -#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ - _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) -#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \ - _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B)) -#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \ - _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B)) -#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \ - _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B)) -#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \ - _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B)) -#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \ - _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B)) -#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \ - _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B)) -#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \ - _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B)) -#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ - _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ - _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) -#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \ - _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \ - _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8) - #define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \ _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \ _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8) @@ -2459,11 +2446,19 @@ #define DEIIR _MMIO(0x44008) #define DEIER _MMIO(0x4400c) +#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \ + DEIER, \ + DEIIR) + #define GTISR _MMIO(0x44010) #define GTIMR _MMIO(0x44014) #define GTIIR _MMIO(0x44018) #define GTIER _MMIO(0x4401c) +#define GT_IRQ_REGS I915_IRQ_REGS(GTIMR, \ + GTIER, \ + GTIIR) + #define GEN8_MASTER_IRQ _MMIO(0x44200) #define GEN8_MASTER_IRQ_CONTROL (1 << 31) #define GEN8_PCU_IRQ (1 << 30) @@ -2489,6 +2484,10 @@ #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) +#define GEN8_GT_IRQ_REGS(which) I915_IRQ_REGS(GEN8_GT_IMR(which), \ + GEN8_GT_IER(which), \ + GEN8_GT_IIR(which)) + #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 #define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ @@ -2506,9 +2505,7 @@ #define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */ #define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */ #define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */ -#define XELPD_PIPE_SOFT_UNDERRUN REG_BIT(22) /* adl/dg2+ */ #define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */ -#define XELPD_PIPE_HARD_UNDERRUN REG_BIT(21) /* adl/dg2+ */ #define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */ #define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */ #define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */ @@ -2540,6 +2537,10 @@ #define GEN8_PIPE_VSYNC REG_BIT(1) #define GEN8_PIPE_VBLANK REG_BIT(0) +#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \ + GEN8_DE_PIPE_IER(pipe), \ + GEN8_DE_PIPE_IIR(pipe)) + #define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A) #define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1) @@ -2575,6 +2576,10 @@ #define TGL_DE_PORT_AUX_DDIB REG_BIT(1) #define TGL_DE_PORT_AUX_DDIA REG_BIT(0) +#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \ + GEN8_DE_PORT_IER, \ + GEN8_DE_PORT_IIR) + #define GEN8_DE_MISC_ISR _MMIO(0x44460) #define GEN8_DE_MISC_IMR _MMIO(0x44464) #define GEN8_DE_MISC_IIR _MMIO(0x44468) @@ -2584,18 +2589,31 @@ #define GEN8_DE_MISC_GSE REG_BIT(27) #define GEN8_DE_EDP_PSR REG_BIT(19) #define XELPDP_PMDEMAND_RSP REG_BIT(3) +#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1) + +#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \ + GEN8_DE_MISC_IER, \ + GEN8_DE_MISC_IIR) #define GEN8_PCU_ISR _MMIO(0x444e0) #define GEN8_PCU_IMR _MMIO(0x444e4) #define GEN8_PCU_IIR _MMIO(0x444e8) #define GEN8_PCU_IER _MMIO(0x444ec) +#define GEN8_PCU_IRQ_REGS I915_IRQ_REGS(GEN8_PCU_IMR, \ + GEN8_PCU_IER, \ + GEN8_PCU_IIR) + #define GEN11_GU_MISC_ISR _MMIO(0x444f0) #define GEN11_GU_MISC_IMR _MMIO(0x444f4) #define GEN11_GU_MISC_IIR _MMIO(0x444f8) #define GEN11_GU_MISC_IER _MMIO(0x444fc) #define GEN11_GU_MISC_GSE (1 << 27) +#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \ + GEN11_GU_MISC_IER, \ + GEN11_GU_MISC_IIR) + #define GEN11_GFX_MSTR_IRQ _MMIO(0x190010) #define GEN11_MASTER_IRQ (1 << 31) #define GEN11_PCU_IRQ (1 << 30) @@ -2639,6 +2657,10 @@ GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \ GEN11_TBT_HOTPLUG(HPD_PORT_TC1)) +#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \ + GEN11_DE_HPD_IER, \ + GEN11_DE_HPD_IIR) + #define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030) #define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038) #define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4)) @@ -2659,6 +2681,10 @@ #define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin)) #define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0) +#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \ + PICAINTERRUPT_IER, \ + PICAINTERRUPT_IIR) + #define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200)) #define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6) #define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5) @@ -2671,6 +2697,7 @@ #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) +#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0) @@ -2869,6 +2896,7 @@ #define SKL_DFSM_PIPE_C_DISABLE (1 << 28) #define TGL_DFSM_PIPE_D_DISABLE (1 << 22) #define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7) +#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3) #define XE2LPD_DE_CAP _MMIO(0x41100) #define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30) @@ -3015,6 +3043,10 @@ #define SDEIIR _MMIO(0xc4008) #define SDEIER _MMIO(0xc400c) +#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \ + SDEIER, \ + SDEIIR) + #define SERR_INT _MMIO(0xc4040) #define SERR_INT_POISON (1 << 31) #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3)) @@ -3098,11 +3130,12 @@ #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) #define _PCH_FPA0 0xc6040 +#define _PCH_FPB0 0xc6048 +#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0) #define FP_CB_TUNE (0x3 << 22) + #define _PCH_FPA1 0xc6044 -#define _PCH_FPB0 0xc6048 #define _PCH_FPB1 0xc604c -#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0) #define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1) #define PCH_DPLL_TEST _MMIO(0xc606c) @@ -3155,50 +3188,93 @@ /* transcoder */ #define _PCH_TRANS_HTOTAL_A 0xe0000 +#define _PCH_TRANS_HTOTAL_B 0xe1000 +#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) #define TRANS_HTOTAL_SHIFT 16 #define TRANS_HACTIVE_SHIFT 0 + #define _PCH_TRANS_HBLANK_A 0xe0004 +#define _PCH_TRANS_HBLANK_B 0xe1004 +#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) #define TRANS_HBLANK_END_SHIFT 16 #define TRANS_HBLANK_START_SHIFT 0 + #define _PCH_TRANS_HSYNC_A 0xe0008 +#define _PCH_TRANS_HSYNC_B 0xe1008 +#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) #define TRANS_HSYNC_END_SHIFT 16 #define TRANS_HSYNC_START_SHIFT 0 + #define _PCH_TRANS_VTOTAL_A 0xe000c +#define _PCH_TRANS_VTOTAL_B 0xe100c +#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) #define TRANS_VTOTAL_SHIFT 16 #define TRANS_VACTIVE_SHIFT 0 + #define _PCH_TRANS_VBLANK_A 0xe0010 +#define _PCH_TRANS_VBLANK_B 0xe1010 +#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) #define TRANS_VBLANK_END_SHIFT 16 #define TRANS_VBLANK_START_SHIFT 0 + #define _PCH_TRANS_VSYNC_A 0xe0014 +#define _PCH_TRANS_VSYNC_B 0xe1014 +#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) #define TRANS_VSYNC_END_SHIFT 16 #define TRANS_VSYNC_START_SHIFT 0 + #define _PCH_TRANS_VSYNCSHIFT_A 0xe0028 +#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 +#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B) #define _PCH_TRANSA_DATA_M1 0xe0030 +#define _PCH_TRANSB_DATA_M1 0xe1030 +#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) + #define _PCH_TRANSA_DATA_N1 0xe0034 +#define _PCH_TRANSB_DATA_N1 0xe1034 +#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) + #define _PCH_TRANSA_DATA_M2 0xe0038 +#define _PCH_TRANSB_DATA_M2 0xe1038 +#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) + #define _PCH_TRANSA_DATA_N2 0xe003c +#define _PCH_TRANSB_DATA_N2 0xe103c +#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) + #define _PCH_TRANSA_LINK_M1 0xe0040 +#define _PCH_TRANSB_LINK_M1 0xe1040 +#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) + #define _PCH_TRANSA_LINK_N1 0xe0044 +#define _PCH_TRANSB_LINK_N1 0xe1044 +#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) + #define _PCH_TRANSA_LINK_M2 0xe0048 +#define _PCH_TRANSB_LINK_M2 0xe1048 +#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) + #define _PCH_TRANSA_LINK_N2 0xe004c +#define _PCH_TRANSB_LINK_N2 0xe104c +#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) /* Per-transcoder DIP controls (PCH) */ #define _VIDEO_DIP_CTL_A 0xe0200 +#define _VIDEO_DIP_CTL_B 0xe1200 +#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) + #define _VIDEO_DIP_DATA_A 0xe0208 +#define _VIDEO_DIP_DATA_B 0xe1208 +#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) + #define _VIDEO_DIP_GCP_A 0xe0210 +#define _VIDEO_DIP_GCP_B 0xe1210 +#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) #define GCP_COLOR_INDICATION (1 << 2) #define GCP_DEFAULT_PHASE_ENABLE (1 << 1) #define GCP_AV_MUTE (1 << 0) -#define _VIDEO_DIP_CTL_B 0xe1200 -#define _VIDEO_DIP_DATA_B 0xe1208 -#define _VIDEO_DIP_GCP_B 0xe1210 - -#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) -#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) -#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) - /* Per-transcoder DIP controls (VLV) */ #define _VLV_VIDEO_DIP_CTL_A 0x60200 #define _VLV_VIDEO_DIP_CTL_B 0x61170 @@ -3225,36 +3301,54 @@ _CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ - #define _HSW_VIDEO_DIP_CTL_A 0x60200 -#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220 -#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260 -#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 -#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 -#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 -#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484 -#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 -#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 -#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 -#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 -#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300 -#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344 -#define _HSW_VIDEO_DIP_GCP_A 0x60210 - #define _HSW_VIDEO_DIP_CTL_B 0x61200 +#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A) + +#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220 #define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220 +#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260 #define _HSW_VIDEO_DIP_VS_DATA_B 0x61260 +#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 #define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 +#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 #define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 +#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 #define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) + +/*ADLP and later: */ +#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484 #define _ADL_VIDEO_DIP_AS_DATA_B 0x61484 +#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\ + _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4) + +#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 #define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440 +#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) + +#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 #define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240 +#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 #define _HSW_VIDEO_DIP_VS_ECC_B 0x61280 +#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 #define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 +#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300 #define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300 +#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344 #define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344 + +#define _HSW_VIDEO_DIP_GCP_A 0x60210 #define _HSW_VIDEO_DIP_GCP_B 0x61210 +#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A) /* Icelake PPS_DATA and _ECC DIP Registers. * These are available for transcoders B,C and eDP. @@ -3264,62 +3358,16 @@ #define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350 #define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350 +#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) + #define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4 #define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 - -#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A) -#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A) -#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) -#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) -#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) -/*ADLP and later: */ -#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\ - _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4) #define _HSW_STEREO_3D_CTL_A 0x70020 -#define S3D_ENABLE (1 << 31) #define _HSW_STEREO_3D_CTL_B 0x71020 - #define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A) - -#define _PCH_TRANS_HTOTAL_B 0xe1000 -#define _PCH_TRANS_HBLANK_B 0xe1004 -#define _PCH_TRANS_HSYNC_B 0xe1008 -#define _PCH_TRANS_VTOTAL_B 0xe100c -#define _PCH_TRANS_VBLANK_B 0xe1010 -#define _PCH_TRANS_VSYNC_B 0xe1014 -#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028 - -#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B) -#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B) -#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B) -#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B) -#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B) -#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B) -#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B) - -#define _PCH_TRANSB_DATA_M1 0xe1030 -#define _PCH_TRANSB_DATA_N1 0xe1034 -#define _PCH_TRANSB_DATA_M2 0xe1038 -#define _PCH_TRANSB_DATA_N2 0xe103c -#define _PCH_TRANSB_LINK_M1 0xe1040 -#define _PCH_TRANSB_LINK_N1 0xe1044 -#define _PCH_TRANSB_LINK_M2 0xe1048 -#define _PCH_TRANSB_LINK_N2 0xe104c - -#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1) -#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1) -#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2) -#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2) -#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1) -#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1) -#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2) -#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2) +#define S3D_ENABLE (1 << 31) #define _PCH_TRANSACONF 0xf0008 #define _PCH_TRANSBCONF 0xf1008 @@ -4125,6 +4173,7 @@ enum skl_power_gate { #define _DPLL1_CFGCR1 0x6C040 #define _DPLL2_CFGCR1 0x6C048 #define _DPLL3_CFGCR1 0x6C050 +#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) #define DPLL_CFGCR1_FREQ_ENABLE (1 << 31) #define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9) #define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9) @@ -4133,6 +4182,7 @@ enum skl_power_gate { #define _DPLL1_CFGCR2 0x6C044 #define _DPLL2_CFGCR2 0x6C04C #define _DPLL3_CFGCR2 0x6C054 +#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) #define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8) #define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8) #define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7) @@ -4151,9 +4201,6 @@ enum skl_power_gate { #define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) -#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) -#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) - /* ICL Clocks */ #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5)) @@ -4246,7 +4293,6 @@ enum skl_power_gate { /* ADL-P Type C PLL */ #define PORTTC1_PLL_ENABLE 0x46038 #define PORTTC2_PLL_ENABLE 0x46040 - #define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \ PORTTC1_PLL_ENABLE, \ PORTTC2_PLL_ENABLE) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index a685db1e815d..e251bcc0c89f 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -284,4 +284,14 @@ typedef struct { #define i915_mmio_reg_equal(a, b) (i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b)) #define i915_mmio_reg_valid(r) (!i915_mmio_reg_equal(r, INVALID_MMIO_REG)) +/* A triplet for IMR/IER/IIR registers. */ +struct i915_irq_regs { + i915_reg_t imr; + i915_reg_t ier; + i915_reg_t iir; +}; + +#define I915_IRQ_REGS(_imr, _ier, _iir) \ + ((const struct i915_irq_regs){ .imr = (_imr), .ier = (_ier), .iir = (_iir) }) + #endif /* __I915_REG_DEFS__ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 519e096c607c..8f62cfa23fb7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -273,11 +273,6 @@ i915_request_active_engine(struct i915_request *rq, return ret; } -static void __rq_init_watchdog(struct i915_request *rq) -{ - rq->watchdog.timer.function = NULL; -} - static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) { struct i915_request *rq = @@ -294,6 +289,14 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) return HRTIMER_NORESTART; } +static void __rq_init_watchdog(struct i915_request *rq) +{ + struct i915_request_watchdog *wdg = &rq->watchdog; + + hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + wdg->timer.function = __rq_watchdog_expired; +} + static void __rq_arm_watchdog(struct i915_request *rq) { struct i915_request_watchdog *wdg = &rq->watchdog; @@ -304,8 +307,6 @@ static void __rq_arm_watchdog(struct i915_request *rq) i915_request_get(rq); - hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - wdg->timer.function = __rq_watchdog_expired; hrtimer_start_range_ns(&wdg->timer, ns_to_ktime(ce->watchdog.timeout_us * NSEC_PER_USEC), @@ -317,7 +318,7 @@ static void __rq_cancel_watchdog(struct i915_request *rq) { struct i915_request_watchdog *wdg = &rq->watchdog; - if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0) + if (hrtimer_try_to_cancel(&wdg->timer) > 0) i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f8373a461f17..f18f1acf2158 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -118,6 +118,7 @@ void i915_save_display(struct drm_i915_private *dev_priv) void i915_restore_display(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); if (!HAS_DISPLAY(dev_priv)) @@ -134,7 +135,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, DSPARB(dev_priv), dev_priv->regfile.saveDSPARB); - intel_vga_redisable(dev_priv); + intel_vga_redisable(display); - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(display); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index ce1cbee1b39d..09d89bdf82f4 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -322,7 +322,7 @@ DEFINE_EVENT(i915_request, i915_request_add, TP_ARGS(rq) ); -#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) +#if IS_ENABLED(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) DEFINE_EVENT(i915_request, i915_request_guc_submit, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 71bdc89bd621..609214231ffc 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -270,7 +270,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ -#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index d2f064d2525c..776f8cc51b2f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -2157,7 +2157,7 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma) { struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; int err; assert_object_held_shared(vma->obj); @@ -2196,7 +2196,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) { struct drm_i915_gem_object *obj = vma->obj; struct i915_address_space *vm = vma->vm; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; struct dma_fence *fence; int err; diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index 26c4dbda076e..f76642886569 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -502,7 +502,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915) CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - if (IS_IVB_GT1(i915)) + if (INTEL_INFO(i915)->gt == 1) intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); else { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 467999249b9a..856b30fa37dc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -25,7 +25,7 @@ #include <linux/string_helpers.h> #include <drm/drm_print.h> -#include <drm/intel/i915_pciids.h> +#include <drm/intel/pciids.h> #include "gt/intel_gt_regs.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index a9fcaf33df9e..ef84eea9ba0b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -140,7 +140,6 @@ enum intel_ppgtt_type { #define DEV_INFO_FOR_EACH_FLAG(func) \ func(is_mobile); \ - func(is_lp); \ func(require_force_probe); \ func(is_dgfx); \ /* Keep has_* in alphabetical order */ \ diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h index 73900c098d59..dc2477179c3e 100644 --- a/drivers/gpu/drm/i915/intel_mchbar_regs.h +++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h @@ -207,6 +207,10 @@ #define PCU_PACKAGE_ENERGY_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x593c) #define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948) + +#define PCU_PACKAGE_TEMPERATURE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5978) +#define TEMP_MASK REG_GENMASK(7, 0) + #define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) #define RP0_CAP_MASK REG_GENMASK(7, 0) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2d0647aca964..1a47ecfd3fd8 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -66,7 +66,7 @@ static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { if (!rpm->available || rpm->no_wakeref_tracking) - return -1; + return INTEL_WAKEREF_DEF; return intel_ref_tracker_alloc(&rpm->debug); } @@ -114,7 +114,7 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - return -1; + return INTEL_WAKEREF_DEF; } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, @@ -250,7 +250,7 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm pm_runtime_get_if_active(rpm->kdev) <= 0) || (!ignore_usecount && pm_runtime_get_if_in_use(rpm->kdev) <= 0)) - return 0; + return NULL; } intel_runtime_pm_acquire(rpm, true); @@ -336,7 +336,7 @@ intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) */ void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(rpm, -1, true); + __intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index de3579d399e1..e22669d61e95 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -42,7 +42,6 @@ struct intel_runtime_pm { atomic_t wakeref_count; struct device *kdev; /* points to i915->drm.dev */ bool available; - bool irqs_enabled; bool no_wakeref_tracking; /* @@ -97,10 +96,16 @@ intel_rpm_wakelock_count(int wakeref_count) return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; } +static inline bool +intel_runtime_pm_suspended(struct intel_runtime_pm *rpm) +{ + return pm_runtime_suspended(rpm->kdev); +} + static inline void assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm) { - WARN_ONCE(pm_runtime_suspended(rpm->kdev), + WARN_ONCE(intel_runtime_pm_suspended(rpm), "Device suspended during HW access\n"); } @@ -189,15 +194,15 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); #define with_intel_runtime_pm(rpm, wf) \ for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #define with_intel_runtime_pm_if_in_use(rpm, wf) \ for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #define with_intel_runtime_pm_if_active(rpm, wf) \ for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index dea2f63184f8..87f246047312 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -27,11 +27,11 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf) if (!atomic_read(&wf->count)) { INTEL_WAKEREF_BUG_ON(wf->wakeref); wf->wakeref = wakeref; - wakeref = 0; + wakeref = NULL; ret = wf->ops->get(wf); if (ret) { - wakeref = xchg(&wf->wakeref, 0); + wakeref = xchg(&wf->wakeref, NULL); wake_up_var(&wf->wakeref); goto unlock; } @@ -52,7 +52,7 @@ unlock: static void ____intel_wakeref_put_last(struct intel_wakeref *wf) { - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_dec_and_test(&wf->count))) @@ -61,7 +61,7 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf) /* ops->put() must reschedule its own release on error/deferral */ if (likely(!wf->ops->put(wf))) { INTEL_WAKEREF_BUG_ON(!wf->wakeref); - wakeref = xchg(&wf->wakeref, 0); + wakeref = xchg(&wf->wakeref, NULL); wake_up_var(&wf->wakeref); } @@ -107,7 +107,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf, __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); atomic_set(&wf->count, 0); - wf->wakeref = 0; + wf->wakeref = NULL; INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); lockdep_init_map(&wf->work.work.lockdep_map, @@ -142,7 +142,7 @@ static void wakeref_auto_timeout(struct timer_list *t) if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) return; - wakeref = fetch_and_zero(&wf->wakeref); + wakeref = xchg(&wf->wakeref, NULL); spin_unlock_irqrestore(&wf->lock, flags); intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); @@ -154,7 +154,7 @@ void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, spin_lock_init(&wf->lock); timer_setup(&wf->timer, wakeref_auto_timeout, 0); refcount_set(&wf->count, 0); - wf->wakeref = 0; + wf->wakeref = NULL; wf->i915 = i915; } diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 68aa3be48251..48836ef52d40 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -21,7 +21,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> -typedef unsigned long intel_wakeref_t; +typedef struct ref_tracker *intel_wakeref_t; #define INTEL_REFTRACK_DEAD_COUNT 16 #define INTEL_REFTRACK_PRINT_LIMIT 16 @@ -273,7 +273,7 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf) */ int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); -#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1)) +#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT) static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir) { @@ -281,17 +281,19 @@ static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *di ref_tracker_alloc(dir, &user, GFP_NOWAIT); - return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF; + return user ?: INTEL_WAKEREF_DEF; } static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir, - intel_wakeref_t handle) + intel_wakeref_t wakeref) { - struct ref_tracker *user; + if (wakeref == INTEL_WAKEREF_DEF) + wakeref = NULL; - user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle; + if (WARN_ON(IS_ERR(wakeref))) + return; - ref_tracker_free(dir, &user); + ref_tracker_free(dir, &wakeref); } void intel_ref_tracker_show(struct ref_tracker_dir *dir, @@ -314,7 +316,7 @@ static inline void intel_wakeref_untrack(struct intel_wakeref *wf, static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) { - return -1; + return INTEL_WAKEREF_DEF; } static inline void intel_wakeref_untrack(struct intel_wakeref *wf, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 75278e78ca90..9cf169665d7c 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -170,7 +170,7 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9 static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) { - if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) + if (!HAS_PXP(i915)) return NULL; /* @@ -461,9 +461,11 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp) } int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *_obj, bool assign) { + struct drm_i915_gem_object *obj = to_intel_bo(_obj); + if (!intel_pxp_is_active(pxp)) return -ENODEV; @@ -529,7 +531,7 @@ void intel_pxp_invalidate(struct intel_pxp *pxp) if (ctx->pxp_wakeref) { intel_runtime_pm_put(&i915->runtime_pm, ctx->pxp_wakeref); - ctx->pxp_wakeref = 0; + ctx->pxp_wakeref = NULL; } spin_lock_irq(&i915->gem.contexts.lock); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index d9372f6f7797..4ed97db5e7c6 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -9,7 +9,7 @@ #include <linux/errno.h> #include <linux/types.h> -struct drm_i915_gem_object; +struct drm_gem_object; struct drm_i915_private; struct intel_pxp; @@ -32,7 +32,7 @@ int intel_pxp_start(struct intel_pxp *pxp); void intel_pxp_end(struct intel_pxp *pxp); int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, bool assign); void intel_pxp_invalidate(struct intel_pxp *pxp); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 61da4ed9d521..0727492576be 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -4,7 +4,7 @@ * Copyright © 2018 Intel Corporation */ -#include <linux/random.h> +#include <linux/prandom.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 05364eca20f7..70330a2e80f2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -26,7 +26,7 @@ #define __I915_SELFTESTS_RANDOM_H__ #include <linux/math64.h> -#include <linux/random.h> +#include <linux/prandom.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 91794ca17a58..ae57eb03dfca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -137,7 +137,7 @@ static const struct intel_device_info mock_info = { struct drm_i915_private *mock_gem_device(void) { -#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) +#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif struct drm_i915_private *i915; @@ -153,7 +153,7 @@ struct drm_i915_private *mock_gem_device(void) dev_set_name(&pdev->dev, "mock"); dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); -#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) +#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) /* HACK to disable iommu for the fake device; force identity mapping */ pdev->dev.iommu = &fake_iommu; #endif @@ -203,7 +203,7 @@ struct drm_i915_private *mock_gem_device(void) intel_root_gt_init_early(i915); mock_uncore_init(&i915->uncore, i915); atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */ - to_gt(i915)->awake = -ENODEV; + to_gt(i915)->awake = INTEL_WAKEREF_MOCK_GT; mock_gt_probe(i915); ret = intel_region_ttm_device_init(i915); diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c index 805c4bfb85fe..7e59591bbed6 100644 --- a/drivers/gpu/drm/i915/selftests/scatterlist.c +++ b/drivers/gpu/drm/i915/selftests/scatterlist.c @@ -22,7 +22,7 @@ */ #include <linux/prime_numbers.h> -#include <linux/random.h> +#include <linux/prandom.h> #include "i915_selftest.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 4aba47bccc63..9e310f4099f4 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -714,7 +714,7 @@ void intel_dram_detect(struct drm_i915_private *i915) * Assume level 0 watermark latency adjustment is needed until proven * otherwise, this w/a is not needed by bxt/glk. */ - dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915); + dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915); if (DISPLAY_VER(i915) >= 14) ret = xelpdp_get_dram_info(i915); @@ -722,7 +722,7 @@ void intel_dram_detect(struct drm_i915_private *i915) ret = gen12_get_dram_info(i915); else if (GRAPHICS_VER(i915) >= 11) ret = gen11_get_dram_info(i915); - else if (IS_GEN9_LP(i915)) + else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) ret = bxt_get_dram_info(i915); else ret = skl_get_dram_info(i915); diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c index 542eea50093c..842db43e46c0 100644 --- a/drivers/gpu/drm/i915/soc/intel_pch.c +++ b/drivers/gpu/drm/i915/soc/intel_pch.c @@ -124,7 +124,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) && !IS_ROCKETLAKE(dev_priv) && - !IS_GEN9_BC(dev_priv)); + !IS_SKYLAKE(dev_priv) && + !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv) && + !IS_COMETLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c new file mode 100644 index 000000000000..243d98cab8c3 --- /dev/null +++ b/drivers/gpu/drm/i915/soc/intel_rom.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "intel_rom.h" +#include "intel_uncore.h" + +struct intel_rom { + /* for PCI ROM */ + struct pci_dev *pdev; + void __iomem *oprom; + + /* for SPI */ + struct intel_uncore *uncore; + loff_t offset; + + size_t size; + + u32 (*read32)(struct intel_rom *rom, loff_t offset); + u16 (*read16)(struct intel_rom *rom, loff_t offset); + void (*read_block)(struct intel_rom *rom, void *data, loff_t offset, size_t size); + void (*free)(struct intel_rom *rom); +}; + +static u32 spi_read32(struct intel_rom *rom, loff_t offset) +{ + intel_uncore_write(rom->uncore, PRIMARY_SPI_ADDRESS, + rom->offset + offset); + + return intel_uncore_read(rom->uncore, PRIMARY_SPI_TRIGGER); +} + +static u16 spi_read16(struct intel_rom *rom, loff_t offset) +{ + return spi_read32(rom, offset) & 0xffff; +} + +struct intel_rom *intel_rom_spi(struct drm_i915_private *i915) +{ + struct intel_rom *rom; + u32 static_region; + + rom = kzalloc(sizeof(*rom), GFP_KERNEL); + if (!rom) + return NULL; + + rom->uncore = &i915->uncore; + + static_region = intel_uncore_read(rom->uncore, SPI_STATIC_REGIONS); + static_region &= OPTIONROM_SPI_REGIONID_MASK; + intel_uncore_write(rom->uncore, PRIMARY_SPI_REGIONID, static_region); + + rom->offset = intel_uncore_read(rom->uncore, OROM_OFFSET) & OROM_OFFSET_MASK; + + rom->size = 0x200000; + + rom->read32 = spi_read32; + rom->read16 = spi_read16; + + return rom; +} + +static u32 pci_read32(struct intel_rom *rom, loff_t offset) +{ + return ioread32(rom->oprom + offset); +} + +static u16 pci_read16(struct intel_rom *rom, loff_t offset) +{ + return ioread16(rom->oprom + offset); +} + +static void pci_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size) +{ + memcpy_fromio(data, rom->oprom + offset, size); +} + +static void pci_free(struct intel_rom *rom) +{ + pci_unmap_rom(rom->pdev, rom->oprom); +} + +struct intel_rom *intel_rom_pci(struct drm_i915_private *i915) +{ + struct intel_rom *rom; + + rom = kzalloc(sizeof(*rom), GFP_KERNEL); + if (!rom) + return NULL; + + rom->pdev = to_pci_dev(i915->drm.dev); + + rom->oprom = pci_map_rom(rom->pdev, &rom->size); + if (!rom->oprom) { + kfree(rom); + return NULL; + } + + rom->read32 = pci_read32; + rom->read16 = pci_read16; + rom->read_block = pci_read_block; + rom->free = pci_free; + + return rom; +} + +u32 intel_rom_read32(struct intel_rom *rom, loff_t offset) +{ + return rom->read32(rom, offset); +} + +u16 intel_rom_read16(struct intel_rom *rom, loff_t offset) +{ + return rom->read16(rom, offset); +} + +void intel_rom_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size) +{ + u32 *ptr = data; + loff_t index; + + if (rom->read_block) { + rom->read_block(rom, data, offset, size); + return; + } + + for (index = 0; index < size; index += 4) + *ptr++ = rom->read32(rom, offset + index); +} + +loff_t intel_rom_find(struct intel_rom *rom, u32 needle) +{ + loff_t offset; + + for (offset = 0; offset < rom->size; offset += 4) { + if (rom->read32(rom, offset) == needle) + return offset; + } + + return -ENOENT; +} + +size_t intel_rom_size(struct intel_rom *rom) +{ + return rom->size; +} + +void intel_rom_free(struct intel_rom *rom) +{ + if (rom && rom->free) + rom->free(rom); + + kfree(rom); +} diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h new file mode 100644 index 000000000000..fb2979c8ef7f --- /dev/null +++ b/drivers/gpu/drm/i915/soc/intel_rom.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __INTEL_ROM_H__ +#define __INTEL_ROM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_rom; + +struct intel_rom *intel_rom_spi(struct drm_i915_private *i915); +struct intel_rom *intel_rom_pci(struct drm_i915_private *i915); + +u32 intel_rom_read32(struct intel_rom *rom, loff_t offset); +u16 intel_rom_read16(struct intel_rom *rom, loff_t offset); +void intel_rom_read_block(struct intel_rom *rom, void *data, + loff_t offset, size_t size); +loff_t intel_rom_find(struct intel_rom *rom, u32 needle); +size_t intel_rom_size(struct intel_rom *rom); +void intel_rom_free(struct intel_rom *rom); + +#endif /* __INTEL_ROM_H__ */ diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 4deeac7ed40a..2bbdc05a3b97 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -321,7 +321,7 @@ static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev) bool reserved = false; u32 retries = 0; - while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT || + while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) || retries < RESERVE_SLOT_MIN_RETRIES) { reserved = pvr_kccb_try_reserve_slot(pvr_dev); if (reserved) diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index 4cb3494c0bb2..5edc3c01af72 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -73,24 +73,12 @@ process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream void *stream; int err; - stream = kzalloc(stream_size, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) { - err = -EFAULT; - goto err_free; - } + stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest); - if (err) - goto err_free; - - kfree(stream); - - return 0; -err_free: kfree(stream); return err; diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index fb17196e05f4..36c0e768698e 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -221,7 +221,7 @@ err_drm_dev_exit: return ret; } -static __always_inline u64 +static __always_inline __maybe_unused u64 pvr_fw_version_packed(u32 major, u32 minor) { return ((u64)major << 32) | minor; diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 78c2f3c6dce0..618503a212a7 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -90,20 +90,13 @@ static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, void *stream; int err; - stream = kzalloc(stream_len, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) { - err = -EFAULT; - goto err_free_stream; - } + stream = memdup_user(u64_to_user_ptr(stream_userptr), stream_len); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job); -err_free_stream: kfree(stream); - return err; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 20cb46012082..c4f08432882b 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue) } } - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } /** @@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job) } mutex_unlock(&pvr_dev->queues.lock); - drm_sched_start(sched); + drm_sched_start(sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 7bd6ba4c6e8a..363f885a7098 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -654,9 +654,7 @@ pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) xa_lock(&pvr_file->vm_ctx_handles); vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); - if (vm_ctx) - kref_get(&vm_ctx->ref_count); - + pvr_vm_context_get(vm_ctx); xa_unlock(&pvr_file->vm_ctx_handles); return vm_ctx; diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 59e3b6a1dff0..e014ed3ae66c 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -1,12 +1,13 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR select DRM_GEM_DMA_HELPER select VIDEOMODE_HELPERS - depends on DRM && ARCH_MXC && ARM64 + depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST) help Choose this if you have a NXP i.MX8MQ based system and want to use the Display Controller Subsystem. This option enables DCSS support. diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c index 31267c00782f..af91e45b5d13 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -206,15 +206,13 @@ int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) if (crtc->irq < 0) return crtc->irq; - ret = request_irq(crtc->irq, dcss_crtc_irq_handler, - 0, "dcss_drm", crtc); + ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN, + "dcss_drm", crtc); if (ret) { dev_err(dcss->dev, "irq request failed with %d.\n", ret); return ret; } - disable_irq(crtc->irq); - return 0; } diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c index 2968f5d5bd41..6bbfd9aa27ac 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dtg.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c @@ -134,14 +134,12 @@ static int dcss_dtg_irq_config(struct dcss_dtg *dtg, dtg->base_reg + DCSS_DTG_INT_MASK); ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler, - 0, "dcss_ctxld_kick", dtg); + IRQF_NO_AUTOEN, "dcss_ctxld_kick", dtg); if (ret) { dev_err(dtg->dev, "dtg: irq request failed.\n"); return ret; } - disable_irq(dtg->ctxld_kick_irq); - dtg->ctxld_kick_irq_en = false; return 0; diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index d0ea4e97cded..63a335c62296 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -5,7 +5,9 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -28,6 +30,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { static const struct drm_driver dcss_kms_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &dcss_cma_fops, .name = "imx-dcss", .desc = "i.MX8MQ Display Subsystem", @@ -145,7 +148,7 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) if (ret) goto cleanup_crtc; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return kms; diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c index 825728c356ff..32c3f46b21da 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -136,7 +136,7 @@ static int div_q(int A, int B) else temp -= B / 2; - result = (int)(temp / B); + result = div_s64(temp, B); return result; } @@ -239,7 +239,7 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, ll_temp = coef[phase][i]; ll_temp <<= PSC_COEFF_PRECISION; ll_temp += sum >> 1; - ll_temp /= sum; + ll_temp = div_s64(ll_temp, sum); coef[phase][i] = (int)ll_temp; } } diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig index bacf0655ebaf..acaf25089001 100644 --- a/drivers/gpu/drm/imx/ipuv3/Kconfig +++ b/drivers/gpu/drm/imx/ipuv3/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_IMX tristate "DRM Support for Freescale i.MX" + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_DMA_HELPER @@ -11,8 +12,11 @@ config DRM_IMX config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" - select DRM_PANEL depends on DRM_IMX + select DRM_BRIDGE + select DRM_BRIDGE_CONNECTOR + select DRM_IMX_LEGACY_BRIDGE + select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS config DRM_IMX_TVE @@ -26,9 +30,13 @@ config DRM_IMX_TVE config DRM_IMX_LDB tristate "Support for LVDS displays" - depends on DRM_IMX && MFD_SYSCON + depends on DRM_IMX depends on COMMON_CLK - select DRM_PANEL + select MFD_SYSCON + select DRM_BRIDGE + select DRM_BRIDGE_CONNECTOR + select DRM_PANEL_BRIDGE + select DRM_IMX_LEGACY_BRIDGE help Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c index 4cfabcf7375a..ced06bd8eae8 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -34,13 +35,6 @@ module_param(legacyfb_depth, int, 0444); DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops); -void imx_drm_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} -EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); - static int imx_drm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -163,6 +157,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv, static const struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .ioctls = imx_drm_ioctls, .num_ioctls = ARRAY_SIZE(imx_drm_ioctls), .fops = &imx_drm_driver_fops, @@ -249,7 +244,7 @@ static int imx_drm_bind(struct device *dev) if (ret) goto err_poll_fini; - drm_fbdev_dma_setup(drm, legacyfb_depth); + drm_client_setup_with_color_mode(drm, legacyfb_depth); return 0; diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h index e721bebda2bd..0c85bf83ffbf 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm.h +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h @@ -3,14 +3,9 @@ #define _IMX_DRM_H_ struct device_node; -struct drm_crtc; struct drm_connector; struct drm_device; -struct drm_display_mode; struct drm_encoder; -struct drm_framebuffer; -struct drm_plane; -struct platform_device; struct imx_crtc_state { struct drm_crtc_state base; @@ -24,21 +19,12 @@ static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s) { return container_of(s, struct imx_crtc_state, base); } -int imx_drm_init_drm(struct platform_device *pdev, - int preferred_bpp); -int imx_drm_exit_drm(void); extern struct platform_driver ipu_drm_driver; -void imx_drm_mode_config_init(struct drm_device *drm); - -struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); - int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); -void imx_drm_connector_destroy(struct drm_connector *connector); - int ipu_planes_assign_pre(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 793dfb1a3ed0..ff74018ac5cd 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -19,19 +19,16 @@ #include <linux/regmap.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> -#include <video/of_videomode.h> - #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" @@ -55,7 +52,6 @@ struct imx_ldb_channel; struct imx_ldb_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct imx_ldb_channel *channel; }; @@ -65,25 +61,13 @@ struct imx_ldb; struct imx_ldb_channel { struct imx_ldb *ldb; - /* Defines what is connected to the ldb, only one at a time */ - struct drm_panel *panel; struct drm_bridge *bridge; struct device_node *child; - struct i2c_adapter *ddc; int chno; - const struct drm_edid *drm_edid; - struct drm_display_mode mode; - int mode_valid; u32 bus_format; - u32 bus_flags; }; -static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) -{ - return container_of(c, struct imx_ldb_encoder, connector)->channel; -} - static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) { return container_of(e, struct imx_ldb_encoder, encoder)->channel; @@ -133,38 +117,6 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch, } } -static int imx_ldb_connector_get_modes(struct drm_connector *connector) -{ - struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); - int num_modes; - - num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector); - if (num_modes > 0) - return num_modes; - - if (!imx_ldb_ch->drm_edid && imx_ldb_ch->ddc) { - imx_ldb_ch->drm_edid = drm_edid_read_ddc(connector, - imx_ldb_ch->ddc); - drm_edid_connector_update(connector, imx_ldb_ch->drm_edid); - } - - if (imx_ldb_ch->drm_edid) - num_modes = drm_edid_connector_add_modes(connector); - - if (imx_ldb_ch->mode_valid) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode); - if (!mode) - return -EINVAL; - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { @@ -205,8 +157,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) return; } - drm_panel_prepare(imx_ldb_ch->panel); - if (dual) { clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]); clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]); @@ -245,8 +195,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) } regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - - drm_panel_enable(imx_ldb_ch->panel); } static void @@ -323,8 +271,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; - drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (imx_ldb_ch == &ldb->channel[1] || dual) @@ -358,8 +304,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) dev_err(ldb->dev, "unable to set di%d parent clock to original parent\n", mux); - - drm_panel_unprepare(imx_ldb_ch->panel); } static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, @@ -374,11 +318,12 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, /* Bus format description in DT overrides connector display info. */ if (!bus_format && di->num_bus_formats) { bus_format = di->bus_formats[0]; - imx_crtc_state->bus_flags = di->bus_flags; } else { bus_format = imx_ldb_ch->bus_format; - imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags; } + + imx_crtc_state->bus_flags = di->bus_flags; + switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; @@ -398,18 +343,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, } -static const struct drm_connector_funcs imx_ldb_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { - .get_modes = imx_ldb_connector_get_modes, -}; - static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, @@ -447,7 +380,6 @@ static int imx_ldb_register(struct drm_device *drm, return PTR_ERR(ldb_encoder); ldb_encoder->channel = imx_ldb_ch; - connector = &ldb_encoder->connector; encoder = &ldb_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); @@ -466,25 +398,16 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); - if (imx_ldb_ch->bridge) { - ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); - if (ret) - return ret; - } else { - /* - * We want to add the connector whenever there is no bridge - * that brings its own, not only when there is a panel. For - * historical reasons, the ldb driver can also work without - * a panel. - */ - drm_connector_helper_add(connector, - &imx_ldb_connector_helper_funcs); - drm_connector_init_with_ddc(drm, connector, - &imx_ldb_connector_funcs, - DRM_MODE_CONNECTOR_LVDS, - imx_ldb_ch->ddc); - drm_connector_attach_encoder(connector, encoder); - } + ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -549,47 +472,6 @@ static const struct of_device_id imx_ldb_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids); -static int imx_ldb_panel_ddc(struct device *dev, - struct imx_ldb_channel *channel, struct device_node *child) -{ - struct device_node *ddc_node; - int ret; - - ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); - if (ddc_node) { - channel->ddc = of_find_i2c_adapter_by_node(ddc_node); - of_node_put(ddc_node); - if (!channel->ddc) { - dev_warn(dev, "failed to get ddc i2c adapter\n"); - return -EPROBE_DEFER; - } - } - - if (!channel->ddc) { - const void *edidp; - int edid_len; - - /* if no DDC available, fallback to hardcoded EDID */ - dev_dbg(dev, "no ddc available\n"); - - edidp = of_get_property(child, "edid", &edid_len); - if (edidp) { - channel->drm_edid = drm_edid_alloc(edidp, edid_len); - if (!channel->drm_edid) - return -ENOMEM; - } else if (!channel->panel) { - /* fallback to display-timings node */ - ret = of_get_drm_display_mode(child, - &channel->mode, - &channel->bus_flags, - OF_USE_NATIVE_MODE); - if (!ret) - channel->mode_valid = 1; - } - } - return 0; -} - static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; @@ -694,29 +576,22 @@ static int imx_ldb_probe(struct platform_device *pdev) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - ret = drm_of_find_panel_or_bridge(child, - imx_ldb->lvds_mux ? 4 : 2, 0, - &channel->panel, &channel->bridge); - if (ret && ret != -ENODEV) - goto free_child; - - /* panel ddc only if there is no bridge */ - if (!channel->bridge) { - ret = imx_ldb_panel_ddc(dev, channel, child); - if (ret) + channel->bridge = devm_drm_of_get_bridge(dev, child, + imx_ldb->lvds_mux ? 4 : 2, 0); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + if (ret != -ENODEV) goto free_child; + channel->bridge = NULL; } bus_format = of_get_bus_format(dev, child); - if (bus_format == -EINVAL) { - /* - * If no bus format was specified in the device tree, - * we can still get it from the connected panel later. - */ - if (channel->panel && channel->panel->funcs && - channel->panel->funcs->get_modes) - bus_format = 0; - } + /* + * If no bus format was specified in the device tree, + * we can still get it from the connected panel later. + */ + if (bus_format == -EINVAL && channel->bridge) + bus_format = 0; if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); @@ -724,6 +599,20 @@ static int imx_ldb_probe(struct platform_device *pdev) goto free_child; } channel->bus_format = bus_format; + + /* + * legacy bridge doesn't handle bus_format, so create it after + * checking the bus_format property. + */ + if (!channel->bridge) { + channel->bridge = devm_imx_drm_legacy_bridge(dev, child, + DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + goto free_child; + } + } + channel->child = child; } @@ -738,16 +627,6 @@ free_child: static void imx_ldb_remove(struct platform_device *pdev) { - struct imx_ldb *imx_ldb = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < 2; i++) { - struct imx_ldb_channel *channel = &imx_ldb->channel[i]; - - drm_edid_free(channel->drm_edid); - i2c_put_adapter(channel->ddc); - } - component_del(&pdev->dev, &imx_ldb_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index 29f494bfff67..d46d07d25f51 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -305,9 +305,15 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder, return 0; } +static void imx_tve_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + static const struct drm_connector_funcs imx_tve_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, + .destroy = imx_tve_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index ef29c9a61a46..99db53e167bd 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -410,14 +410,12 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); - ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, - "imx_drm", ipu_crtc); + ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, + IRQF_NO_AUTOEN, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); return ret; } - /* Only enable IRQ when we actually need it to trigger work. */ - disable_irq(ipu_crtc->irq); return 0; } diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 91d7808a2d8d..70f62e89622e 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -12,21 +12,18 @@ #include <linux/platform_device.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> - #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" struct imx_parallel_display_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct drm_bridge bridge; struct imx_parallel_display *pd; @@ -34,79 +31,15 @@ struct imx_parallel_display_encoder { struct imx_parallel_display { struct device *dev; - const struct drm_edid *drm_edid; u32 bus_format; - u32 bus_flags; - struct drm_display_mode mode; - struct drm_panel *panel; struct drm_bridge *next_bridge; }; -static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) -{ - return container_of(c, struct imx_parallel_display_encoder, connector)->pd; -} - static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; } -static int imx_pd_connector_get_modes(struct drm_connector *connector) -{ - struct imx_parallel_display *imxpd = con_to_imxpd(connector); - struct device_node *np = imxpd->dev->of_node; - int num_modes; - - num_modes = drm_panel_get_modes(imxpd->panel, connector); - if (num_modes > 0) - return num_modes; - - if (imxpd->drm_edid) { - drm_edid_connector_update(connector, imxpd->drm_edid); - num_modes = drm_edid_connector_add_modes(connector); - } - - if (np) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - int ret; - - if (!mode) - return 0; - - ret = of_get_drm_display_mode(np, &imxpd->mode, - &imxpd->bus_flags, - OF_USE_NATIVE_MODE); - if (ret) { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - -static void imx_pd_bridge_enable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_prepare(imxpd->panel); - drm_panel_enable(imxpd->panel); -} - -static void imx_pd_bridge_disable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_disable(imxpd->panel); - drm_panel_unprepare(imxpd->panel); -} - static const u32 imx_pd_bus_fmts[] = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_BGR888_1X24, @@ -200,7 +133,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); struct drm_bridge_state *next_bridge_state = NULL; struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; @@ -212,10 +144,8 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (next_bridge_state) bus_flags = next_bridge_state->input_bus_cfg.flags; - else if (di->num_bus_formats) - bus_flags = di->bus_flags; else - bus_flags = imxpd->bus_flags; + bus_flags = di->bus_flags; bus_fmt = bridge_state->input_bus_cfg.format; if (!imx_pd_format_supported(bus_fmt)) @@ -231,21 +161,16 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static const struct drm_connector_funcs imx_pd_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; +static int imx_pd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); -static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { - .get_modes = imx_pd_connector_get_modes, -}; + return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags); +} static const struct drm_bridge_funcs imx_pd_bridge_funcs = { - .enable = imx_pd_bridge_enable, - .disable = imx_pd_bridge_disable, + .attach = imx_pd_bridge_attach, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -270,7 +195,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imxpd_encoder); imxpd_encoder->pd = imxpd; - connector = &imxpd_encoder->connector; encoder = &imxpd_encoder->encoder; bridge = &imxpd_encoder->bridge; @@ -278,28 +202,14 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - /* set the connector's dpms to OFF so that - * drm_helper_connector_dpms() won't return - * immediately since the current state is ON - * at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - bridge->funcs = &imx_pd_bridge_funcs; - drm_bridge_attach(encoder, bridge, NULL, 0); - - if (imxpd->next_bridge) { - ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); - if (ret < 0) - return ret; - } else { - drm_connector_helper_add(connector, - &imx_pd_connector_helper_funcs); - drm_connector_init(drm, connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_DPI); - - drm_connector_attach_encoder(connector, encoder); - } + drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -312,9 +222,7 @@ static int imx_pd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const u8 *edidp; struct imx_parallel_display *imxpd; - int edid_len; int ret; u32 bus_format = 0; const char *fmt; @@ -324,14 +232,13 @@ static int imx_pd_probe(struct platform_device *pdev) return -ENOMEM; /* port@1 is the output port */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, - &imxpd->next_bridge); - if (ret && ret != -ENODEV) + imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); + if (imxpd->next_bridge == ERR_PTR(-ENODEV)) + imxpd->next_bridge = devm_imx_drm_legacy_bridge(dev, np, DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(imxpd->next_bridge)) { + ret = PTR_ERR(imxpd->next_bridge); return ret; - - edidp = of_get_property(np, "edid", &edid_len); - if (edidp) - imxpd->drm_edid = drm_edid_alloc(edidp, edid_len); + } ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { @@ -355,11 +262,7 @@ static int imx_pd_probe(struct platform_device *pdev) static void imx_pd_remove(struct platform_device *pdev) { - struct imx_parallel_display *imxpd = platform_get_drvdata(pdev); - component_del(&pdev->dev, &imx_pd_ops); - - drm_edid_free(imxpd->drm_edid); } static const struct of_device_id imx_pd_dt_ids[] = { diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig index 9c28bb0f4662..75869489b0e6 100644 --- a/drivers/gpu/drm/imx/lcdc/Kconfig +++ b/drivers/gpu/drm/imx/lcdc/Kconfig @@ -1,6 +1,7 @@ config DRM_IMX_LCDC tristate "Freescale i.MX LCDC displays" depends on DRM && (ARCH_MXC || COMPILE_TEST) + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 36668455aee8..3215c4acd675 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -3,6 +3,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -348,6 +349,7 @@ static struct drm_driver imx_lcdc_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &imx_lcdc_drm_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "imx-lcdc", .desc = "i.MX LCDC driver", .date = "20200716", @@ -501,7 +503,7 @@ static int imx_lcdc_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Cannot register device\n"); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 8cd7b750dffe..04ecfb0c5dd6 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -6,6 +6,7 @@ config DRM_INGENIC depends on OF depends on COMMON_CLK select DRM_BRIDGE + select DRM_CLIENT_SELECTION select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 39fa291f43dd..056b70b63554 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -24,6 +24,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_damage_helper.h> @@ -960,6 +961,7 @@ static const struct drm_driver ingenic_drm_driver_data = { .fops = &ingenic_drm_fops, .gem_create_object = ingenic_drm_gem_create_object, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = { @@ -1399,7 +1401,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) goto err_clk_notifier_unregister; } - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig index e5ae3ec52392..7a2aa892a957 100644 --- a/drivers/gpu/drm/kmb/Kconfig +++ b/drivers/gpu/drm/kmb/Kconfig @@ -2,6 +2,7 @@ config DRM_KMB_DISPLAY tristate "Intel Keembay Display" depends on DRM depends on ARCH_KEEMBAY || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 169b83987ce2..0274ab9caa85 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -14,6 +14,7 @@ #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -441,6 +442,7 @@ static const struct drm_driver kmb_driver = { /* GEM Operations */ .fops = &fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "kmb-drm", .desc = "KEEMBAY DISPLAY DRIVER", .date = DRIVER_DATE, @@ -561,7 +563,7 @@ static int kmb_probe(struct platform_device *pdev) if (ret) goto err_register; - drm_fbdev_dma_setup(&kmb->drm, 0); + drm_client_setup(&kmb->drm, NULL); return 0; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c index cf7cf0b07541..faf38ca9e44c 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.c +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -818,7 +818,7 @@ static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no, } } -static inline void +static inline __maybe_unused void set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi, u32 dphy_no, u32 freq) @@ -830,7 +830,7 @@ static inline void (freq & 0x7f)); } -static inline void +static inline __maybe_unused void set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi, u32 dphy_no, u32 freq) diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h index 5543bf0474bc..9f827260a89d 100644 --- a/drivers/gpu/drm/lib/drm_random.h +++ b/drivers/gpu/drm/lib/drm_random.h @@ -6,7 +6,7 @@ * be transposed to lib/ at the earliest convenience. */ -#include <linux/random.h> +#include <linux/prandom.h> #define DRM_RND_STATE_INITIALIZER(seed__) ({ \ struct rnd_state state__; \ diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 1a944edb6ddc..b40c90e97d7e 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job lima_pm_idle(ldev); drm_sched_resubmit_jobs(&pipe->base); - drm_sched_start(&pipe->base); + drm_sched_start(&pipe->base, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig index 1df22a852a23..579a358ed5cf 100644 --- a/drivers/gpu/drm/logicvc/Kconfig +++ b/drivers/gpu/drm/logicvc/Kconfig @@ -2,6 +2,7 @@ config DRM_LOGICVC tristate "LogiCVC DRM" depends on DRM depends on OF || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_KMS_DMA_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c index 01a37e28c080..e4d90701b29d 100644 --- a/drivers/gpu/drm/logicvc/logicvc_drm.c +++ b/drivers/gpu/drm/logicvc/logicvc_drm.c @@ -16,8 +16,10 @@ #include <linux/types.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_print.h> @@ -55,6 +57,7 @@ static struct drm_driver logicvc_drm_driver = { .minor = 0, DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static struct regmap_config logicvc_drm_regmap_config = { @@ -301,7 +304,6 @@ static int logicvc_drm_probe(struct platform_device *pdev) struct regmap *regmap = NULL; struct resource res; void __iomem *base; - unsigned int preferred_bpp; int irq; int ret; @@ -439,17 +441,7 @@ static int logicvc_drm_probe(struct platform_device *pdev) goto error_mode; } - switch (drm_dev->mode_config.preferred_depth) { - case 16: - preferred_bpp = 16; - break; - case 24: - case 32: - default: - preferred_bpp = 32; - break; - } - drm_fbdev_dma_setup(drm_dev, preferred_bpp); + drm_client_setup(drm_dev, NULL); return 0; diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig index 9ed463a76ae2..552edfec7afb 100644 --- a/drivers/gpu/drm/loongson/Kconfig +++ b/drivers/gpu/drm/loongson/Kconfig @@ -4,6 +4,7 @@ config DRM_LOONGSON tristate "DRM support for Loongson Graphics" depends on DRM && PCI && MMU depends on LOONGARCH || MIPS || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_TTM select DRM_TTM_HELPER diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c index adc7344d2f80..b350bdcf1645 100644 --- a/drivers/gpu/drm/loongson/lsdc_drv.c +++ b/drivers/gpu/drm/loongson/lsdc_drv.c @@ -3,12 +3,13 @@ * Copyright (C) 2023 Loongson Technology Corporation Limited */ +#include <linux/aperture.h> #include <linux/pci.h> #include <linux/vgaarb.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -47,6 +48,7 @@ static const struct drm_driver lsdc_drm_driver = { .dumb_create = lsdc_dumb_create, .dumb_map_offset = lsdc_dumb_map_offset, .gem_prime_import_sg_table = lsdc_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, }; static const struct drm_mode_config_funcs lsdc_mode_config_funcs = { @@ -213,9 +215,9 @@ lsdc_create_device(struct pci_dev *pdev, return ERR_PTR(ret); } - ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base, - ldev->vram_size, - driver); + ret = aperture_remove_conflicting_devices(ldev->vram_base, + ldev->vram_size, + driver->name); if (ret) { drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret); return ERR_PTR(ret); @@ -314,7 +316,7 @@ static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - drm_fbdev_ttm_setup(ddev, 32); + drm_client_setup(ddev, NULL); return 0; } diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig index 907460b69d4f..3516c8d2a5d9 100644 --- a/drivers/gpu/drm/mcde/Kconfig +++ b/drivers/gpu/drm/mcde/Kconfig @@ -6,6 +6,7 @@ config DRM_MCDE depends on OF depends on COMMON_CLK select MFD_SYSCON + select DRM_CLIENT_SELECTION select DRM_MIPI_DSI select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 10c06440c7e7..f60bdd7b6c13 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -67,6 +67,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fbdev_dma.h> @@ -212,6 +213,7 @@ static const struct drm_driver mcde_drm_driver = { .minor = 0, .patchlevel = 0, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static int mcde_drm_bind(struct device *dev) @@ -237,7 +239,7 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; @@ -473,6 +475,7 @@ static const struct of_device_id mcde_of_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, mcde_of_match); static struct platform_driver mcde_driver = { .driver = { diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 417ac8c9af41..f496e6cfdfe0 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -2,11 +2,12 @@ config DRM_MEDIATEK tristate "DRM Support for Mediatek SoCs" depends on DRM - depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) + depends on ARCH_MEDIATEK || COMPILE_TEST depends on COMMON_CLK - depends on HAVE_ARM_SMCCC + depends on HAVE_ARM_SMCCC || COMPILE_TEST depends on OF depends on MTK_MMSYS + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04154db9085c..04217a36939c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -109,6 +109,7 @@ size_t mtk_ovl_get_num_formats(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node); void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next); void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index bf2546c4681a..187855d83590 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -497,6 +497,41 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } +static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node, + enum mtk_ovl_adaptor_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ovl_adaptor_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node) +{ + enum mtk_ovl_adaptor_comp_type type; + int ret; + + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) + return false; + + if (type >= OVL_ADAPTOR_TYPE_NUM) + return false; + + /* + * In the context of mediatek-drm, ETHDR, MDP_RDMA and Padding are + * used exclusively by OVL Adaptor: if this component is not one of + * those, it's likely not an OVL Adaptor path. + */ + return type == OVL_ADAPTOR_TYPE_ETHDR || + type == OVL_ADAPTOR_TYPE_MDP_RDMA || + type == OVL_ADAPTOR_TYPE_PADDING; +} + static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) { struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); @@ -506,12 +541,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma parent = dev->parent->parent->of_node->parent; for_each_child_of_node_scoped(parent, node) { - const struct of_device_id *of_id; enum mtk_ovl_adaptor_comp_type type; - int id; + int id, ret; - of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); - if (!of_id) + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -520,7 +554,6 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma continue; } - type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data; id = ovl_adaptor_comp_get_id(dev, node, type); if (id < 0) { dev_warn(dev, "Skipping unknown component %pOF\n", diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index f2bee617f063..1cc916b16471 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -394,7 +394,7 @@ static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = { }, }; -static struct regmap_config mtk_dp_regmap_config = { +static const struct regmap_config mtk_dp_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index a08d20654954..20a9d589fd75 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -704,6 +704,20 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + int ret; + + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); + if (IS_ERR(dpi->next_bridge)) { + ret = PTR_ERR(dpi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); + if (IS_ERR(dpi->next_bridge)) + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), + "Failed to get bridge\n"); + } return drm_bridge_attach(bridge->encoder, dpi->next_bridge, &dpi->bridge, flags); @@ -1058,13 +1072,6 @@ static int mtk_dpi_probe(struct platform_device *pdev) if (dpi->irq < 0) return dpi->irq; - dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dpi->next_bridge)) - return dev_err_probe(dev, PTR_ERR(dpi->next_bridge), - "Failed to get bridge\n"); - - dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node); - platform_set_drvdata(pdev, dpi); dpi->bridge.funcs = &mtk_dpi_bridge_funcs; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 3e807195a0d0..9a8ef8558da9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> @@ -26,6 +27,7 @@ #include "mtk_crtc.h" #include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" #include "mtk_drm_drv.h" #include "mtk_gem.h" @@ -371,12 +373,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev) struct mtk_drm_private *temp_drm_priv; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; - struct device_node *node; struct device *drm_dev; unsigned int cnt = 0; int i, j; - for_each_child_of_node(phandle->parent, node) { + for_each_child_of_node_scoped(phandle->parent, node) { struct platform_device *pdev; of_id = of_match_node(mtk_drm_of_ids, node); @@ -606,6 +607,7 @@ static const struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = mtk_gem_dumb_create, + DRM_FBDEV_DMA_DRIVER_OPS, .gem_prime_import = mtk_gem_prime_import, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, @@ -662,7 +664,7 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; @@ -818,12 +820,235 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { { } }; +static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ddp_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node, + int output_port, enum mtk_crtc_path crtc_path, + struct device_node **next, unsigned int *cid) +{ + struct device_node *ep_dev_node, *ep_out; + enum mtk_ddp_comp_type comp_type; + int ret; + + ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path); + if (!ep_out) + return -ENOENT; + + ep_dev_node = of_graph_get_remote_port_parent(ep_out); + of_node_put(ep_out); + if (!ep_dev_node) + return -EINVAL; + + /* + * Pass the next node pointer regardless of failures in the later code + * so that if this function is called in a loop it will walk through all + * of the subsequent endpoints anyway. + */ + *next = ep_dev_node; + + if (!of_device_is_available(ep_dev_node)) + return -ENODEV; + + ret = mtk_drm_of_get_ddp_comp_type(ep_dev_node, &comp_type); + if (ret) { + if (mtk_ovl_adaptor_is_comp_present(ep_dev_node)) { + *cid = (unsigned int)DDP_COMPONENT_DRM_OVL_ADAPTOR; + return 0; + } + return ret; + } + + ret = mtk_ddp_comp_get_id(ep_dev_node, comp_type); + if (ret < 0) + return ret; + + /* All ok! Pass the Component ID to the caller. */ + *cid = (unsigned int)ret; + + return 0; +} + +/** + * mtk_drm_of_ddp_path_build_one - Build a Display HW Pipeline for a CRTC Path + * @dev: The mediatek-drm device + * @cpath: CRTC Path relative to a VDO or MMSYS + * @out_path: Pointer to an array that will contain the new pipeline + * @out_path_len: Number of entries in the pipeline array + * + * MediaTek SoCs can use different DDP hardware pipelines (or paths) depending + * on the board-specific desired display configuration; this function walks + * through all of the output endpoints starting from a VDO or MMSYS hardware + * instance and builds the right pipeline as specified in device trees. + * + * Return: + * * %0 - Display HW Pipeline successfully built and validated + * * %-ENOENT - Display pipeline was not specified in device tree + * * %-EINVAL - Display pipeline built but validation failed + * * %-ENOMEM - Failure to allocate pipeline array to pass to the caller + */ +static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path cpath, + const unsigned int **out_path, + unsigned int *out_path_len) +{ + struct device_node *next, *prev, *vdo = dev->parent->of_node; + unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 }; + unsigned int *final_ddp_path; + unsigned short int idx = 0; + bool ovl_adaptor_comp_added = false; + int ret; + + /* Get the first entry for the temp_path array */ + ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]); + if (ret) { + if (next && temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_dbg(dev, "Adding OVL Adaptor for %pOF\n", next); + ovl_adaptor_comp_added = true; + } else { + if (next) + dev_err(dev, "Invalid component %pOF\n", next); + else + dev_err(dev, "Cannot find first endpoint for path %d\n", cpath); + + return ret; + } + } + idx++; + + /* + * Walk through port outputs until we reach the last valid mediatek-drm component. + * To be valid, this must end with an "invalid" component that is a display node. + */ + do { + prev = next; + ret = mtk_drm_of_get_ddp_ep_cid(next, 1, cpath, &next, &temp_path[idx]); + of_node_put(prev); + if (ret) { + of_node_put(next); + break; + } + + /* + * If this is an OVL adaptor exclusive component and one of those + * was already added, don't add another instance of the generic + * DDP_COMPONENT_OVL_ADAPTOR, as this is used only to decide whether + * to probe that component master driver of which only one instance + * is needed and possible. + */ + if (temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + if (!ovl_adaptor_comp_added) + ovl_adaptor_comp_added = true; + else + idx--; + } + } while (++idx < DDP_COMPONENT_DRM_ID_MAX); + + /* + * The device component might not be enabled: in that case, don't + * check the last entry and just report that the device is missing. + */ + if (ret == -ENODEV) + return ret; + + /* If the last entry is not a final display output, the configuration is wrong */ + switch (temp_path[idx - 1]) { + case DDP_COMPONENT_DP_INTF0: + case DDP_COMPONENT_DP_INTF1: + case DDP_COMPONENT_DPI0: + case DDP_COMPONENT_DPI1: + case DDP_COMPONENT_DSI0: + case DDP_COMPONENT_DSI1: + case DDP_COMPONENT_DSI2: + case DDP_COMPONENT_DSI3: + break; + default: + dev_err(dev, "Invalid display hw pipeline. Last component: %d (ret=%d)\n", + temp_path[idx - 1], ret); + return -EINVAL; + } + + final_ddp_path = devm_kmemdup(dev, temp_path, idx * sizeof(temp_path[0]), GFP_KERNEL); + if (!final_ddp_path) + return -ENOMEM; + + dev_dbg(dev, "Display HW Pipeline built with %d components.\n", idx); + + /* Pipeline built! */ + *out_path = final_ddp_path; + *out_path_len = idx; + + return 0; +} + +static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node, + struct mtk_mmsys_driver_data *data) +{ + struct device_node *ep_node; + struct of_endpoint of_ep; + bool output_present[MAX_CRTC] = { false }; + int ret; + + for_each_endpoint_of_node(node, ep_node) { + ret = of_graph_parse_endpoint(ep_node, &of_ep); + if (ret) { + dev_err_probe(dev, ret, "Cannot parse endpoint\n"); + break; + } + + if (of_ep.id >= MAX_CRTC) { + ret = dev_err_probe(dev, -EINVAL, + "Invalid endpoint%u number\n", of_ep.port); + break; + } + + output_present[of_ep.id] = true; + } + + if (ret) { + of_node_put(ep_node); + return ret; + } + + if (output_present[CRTC_MAIN]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_MAIN, + &data->main_path, &data->main_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_EXT]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_EXT, + &data->ext_path, &data->ext_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_THIRD]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_THIRD, + &data->third_path, &data->third_len); + if (ret && ret != -ENODEV) + return ret; + } + + return 0; +} + static int mtk_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *phandle = dev->parent->of_node; const struct of_device_id *of_id; struct mtk_drm_private *private; + struct mtk_mmsys_driver_data *mtk_drm_data; struct device_node *node; struct component_match *match = NULL; struct platform_device *ovl_adaptor; @@ -844,7 +1069,27 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - private->data = of_id->data; + mtk_drm_data = (struct mtk_mmsys_driver_data *)of_id->data; + if (!mtk_drm_data) + return -EINVAL; + + /* Try to build the display pipeline from devicetree graphs */ + if (of_graph_is_present(phandle)) { + dev_dbg(dev, "Building display pipeline for MMSYS %u\n", + mtk_drm_data->mmsys_id); + private->data = devm_kmemdup(dev, mtk_drm_data, + sizeof(*mtk_drm_data), GFP_KERNEL); + if (!private->data) + return -ENOMEM; + + ret = mtk_drm_of_ddp_path_build(dev, phandle, private->data); + if (ret) + return ret; + } else { + /* No devicetree graphs support: go with hardcoded paths if present */ + dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id); + private->data = mtk_drm_data; + }; private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num, sizeof(*private->all_drm_private), @@ -866,12 +1111,11 @@ static int mtk_drm_probe(struct platform_device *pdev) /* Iterate over sibling DISP function blocks */ for_each_child_of_node(phandle->parent, node) { - const struct of_device_id *of_id; enum mtk_ddp_comp_type comp_type; int comp_id; - of_id = of_match_node(mtk_ddp_comp_dt_ids, node); - if (!of_id) + ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type); + if (ret) continue; if (!of_device_is_available(node)) { @@ -880,8 +1124,6 @@ static int mtk_drm_probe(struct platform_device *pdev) continue; } - comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data; - if (comp_type == MTK_DISP_MUTEX) { int id; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index ce897984de51..675cdc90a440 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -63,7 +63,7 @@ struct mtk_drm_private { struct device *mmsys_dev; struct device_node *comp_node[DDP_COMPONENT_DRM_ID_MAX]; struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_DRM_ID_MAX]; - const struct mtk_mmsys_driver_data *data; + struct mtk_mmsys_driver_data *data; struct drm_atomic_state *suspend_state; unsigned int mbox_index; struct mtk_drm_private **all_drm_private; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index eeec641cab60..33ceeb8d6925 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -988,9 +988,17 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host, dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; - dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); - if (IS_ERR(dsi->next_bridge)) - return PTR_ERR(dsi->next_bridge); + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + } drm_bridge_add(&dsi->bridge); diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig index 2544756538cc..417f79829cf8 100644 --- a/drivers/gpu/drm/meson/Kconfig +++ b/drivers/gpu/drm/meson/Kconfig @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_MESON tristate "DRM Support for Amlogic Meson Display Controller" - depends on DRM && OF && (ARM || ARM64) + depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on ARCH_MESON || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 4bd0baa2a4f5..7cace75a38af 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -8,6 +8,7 @@ * Jasper St. Pierre <jstpierre@mecheye.net> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/module.h> #include <linux/of_graph.h> @@ -15,8 +16,8 @@ #include <linux/platform_device.h> #include <linux/soc/amlogic/meson-canvas.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -98,6 +99,7 @@ static const struct drm_driver meson_driver = { /* DMA Ops */ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, /* Misc */ .fops = &fops, @@ -126,7 +128,7 @@ static bool meson_vpu_has_available_connectors(struct device *dev) return false; } -static struct regmap_config meson_regmap_config = { +static const struct regmap_config meson_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -277,7 +279,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) * Remove early framebuffers (ie. simplefb). The framebuffer can be * located anywhere in RAM */ - ret = drm_aperture_remove_framebuffers(&meson_driver); + ret = aperture_remove_all_conflicting_devices(meson_driver.name); if (ret) goto free_canvas_vd1_2; @@ -353,7 +355,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto uninstall_irq; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5565f7777529..b75db829b1da 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -272,20 +272,6 @@ static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi, writeb(data, dw_hdmi->hdmitx + addr); } -/* Helper to change specific bits in controller registers */ -static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, - unsigned int addr, - unsigned int mask, - unsigned int val) -{ - unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr); - - data &= ~mask; - data |= val; - - dw_hdmi->data->dwc_write(dw_hdmi, addr, data); -} - /* Bridge */ /* Setup PHY bandwidth modes */ diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index 3096944a8f0a..412dcbea0e2d 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -2,6 +2,7 @@ config DRM_MGAG200 tristate "Matrox G200" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select I2C @@ -20,4 +21,4 @@ config DRM_MGAG200_DISABLE_WRITECOMBINE performances. This can interfere with real-time tasks; even if they are running on other CPU cores than the graphics output. Enable this option only if you run realtime tasks on a server with a - Matrox G200.
\ No newline at end of file + Matrox G200. diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 9f5925693686..97fd7eb765b4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -6,14 +6,16 @@ * Dave Airlie */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> @@ -100,6 +102,7 @@ static const struct drm_driver mgag200_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* @@ -225,7 +228,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *dev; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, mgag200_driver.name); if (ret) return ret; @@ -276,7 +279,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * FIXME: A 24-bit color depth does not work with 24 bpp on * G200ER. Force 32 bpp. */ - drm_fbdev_shmem_setup(dev, 32); + drm_client_setup_with_fourcc(dev, DRM_FORMAT_XRGB8888); return 0; } diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 90c68106b63b..7ec833b6d829 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -6,6 +6,7 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST depends on COMMON_CLK depends on IOMMU_SUPPORT + depends on OF depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n @@ -14,6 +15,7 @@ config DRM_MSM select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_AUX_BUS select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER @@ -92,6 +94,7 @@ config DRM_MSM_DPU bool "Enable DPU support in MSM DRM driver" depends on DRM_MSM select DRM_MSM_MDSS + select DRM_DISPLAY_DSC_HELPER default y help Compile in support for the Display Processing Unit in @@ -113,6 +116,7 @@ config DRM_MSM_DSI depends on DRM_MSM select DRM_PANEL select DRM_MIPI_DSI + select DRM_DISPLAY_DSC_HELPER default y help Choose this option if you have a need for MIPI DSI connector diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 13110fcc46a8..f274d9430cc3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,6 +23,7 @@ adreno-y := \ adreno/a6xx_gpu.o \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ + adreno/a6xx_preempt.o \ adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ @@ -210,6 +211,7 @@ DISPLAY_HEADERS = \ generated/mdp4.xml.h \ generated/mdp5.xml.h \ generated/mdp_common.xml.h \ + generated/mdss.xml.h \ generated/sfpb.xml.h $(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS)) diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 0dc255ddf5ce..379a3d346c30 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b46ff49f47cf..b6df115bb567 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8b4cdf95f445..50c490b492f0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e09044930547..ee89db72e36e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) unsigned int i, ibs = 0; if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { - gpu->cur_ctx_seqno = 0; + ring->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); return; } @@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 7705f8010484..6b91e0bd1514 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -307,7 +307,7 @@ int a5xx_power_init(struct msm_gpu *gpu) else if (adreno_is_a540(adreno_gpu)) a540_lm_setup(gpu); - /* Set up SP/TP power collpase */ + /* Set up SP/TP power collapse */ a5xx_pc_init(gpu); /* Start the GPMU */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index 0312b6ee0356..0c560e84ad5a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -973,6 +973,25 @@ static const struct adreno_info a6xx_gpus[] = { }, .address_space_size = SZ_16G, }, { + .chip_ids = ADRENO_CHIP_IDS(0x06060300), + .family = ADRENO_6XX_GEN4, + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a663_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | + ADRENO_QUIRK_HAS_HW_APRIV, + .init = a6xx_gpu_init, + .a6xx = &(const struct a6xx_info) { + .hwcg = a690_hwcg, + .protect = &a660_protect, + .gmu_cgc_mode = 0x00020200, + .prim_fifo_threshold = 0x00300200, + }, + .address_space_size = SZ_16G, + }, { .chip_ids = ADRENO_CHIP_IDS(0x06030500), .family = ADRENO_6XX_GEN4, .fw = { @@ -1281,6 +1300,28 @@ static const u32 a730_protect_regs[] = { }; DECLARE_ADRENO_PROTECT(a730_protect, 48); +static const uint32_t a7xx_pwrup_reglist_regs[] = { + REG_A6XX_UCHE_TRAP_BASE, + REG_A6XX_UCHE_TRAP_BASE + 1, + REG_A6XX_UCHE_WRITE_THRU_BASE, + REG_A6XX_UCHE_WRITE_THRU_BASE + 1, + REG_A6XX_UCHE_GMEM_RANGE_MIN, + REG_A6XX_UCHE_GMEM_RANGE_MIN + 1, + REG_A6XX_UCHE_GMEM_RANGE_MAX, + REG_A6XX_UCHE_GMEM_RANGE_MAX + 1, + REG_A6XX_UCHE_CACHE_WAYS, + REG_A6XX_UCHE_MODE_CNTL, + REG_A6XX_RB_NC_MODE_CNTL, + REG_A6XX_RB_CMP_DBG_ECO_CNTL, + REG_A7XX_GRAS_NC_MODE_CNTL, + REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + REG_A6XX_UCHE_GBIF_GX_CONFIG, + REG_A6XX_UCHE_CLIENT_PF, + REG_A6XX_TPL1_DBG_ECO_CNTL1, +}; + +DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist); + static const struct adreno_info a7xx_gpus[] = { { .chip_ids = ADRENO_CHIP_IDS(0x07000200), @@ -1315,15 +1356,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = SZ_2M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a730_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a730_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_cgc_mode = 0x00020000, }, .address_space_size = SZ_16G, + .preempt_record_size = 2860 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */ .family = ADRENO_7XX_GEN2, @@ -1334,16 +1378,19 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "a740_zap.mdt", .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7020100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */ .family = ADRENO_7XX_GEN2, @@ -1354,15 +1401,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .a6xx = &(const struct a6xx_info) { .hwcg = a740_hwcg, .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7050001, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_256G, + .preempt_record_size = 4192 * SZ_1K, }, { .chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */ .family = ADRENO_7XX_GEN3, @@ -1373,15 +1423,18 @@ static const struct adreno_info a7xx_gpus[] = { .gmem = 3 * SZ_1M, .inactive_period = DRM_MSM_INACTIVE_PERIOD, .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT | - ADRENO_QUIRK_HAS_HW_APRIV, + ADRENO_QUIRK_HAS_HW_APRIV | + ADRENO_QUIRK_PREEMPTION, .init = a6xx_gpu_init, .zapfw = "gen70900_zap.mbn", .a6xx = &(const struct a6xx_info) { .protect = &a730_protect, + .pwrup_reglist = &a7xx_pwrup_reglist, .gmu_chipid = 0x7090100, .gmu_cgc_mode = 0x00020202, }, .address_space_size = SZ_16G, + .preempt_record_size = 3572 * SZ_1K, } }; DECLARE_ADRENO_GPULIST(a7xx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 37927bdd6fbe..14db7376c712 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1522,15 +1522,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, irq = platform_get_irq_byname(pdev, name); - ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", name, ret); return ret; } - disable_irq(irq); - return irq; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 94b6c5cab6f4..b4a79f88ccf4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -99,6 +99,7 @@ struct a6xx_gmu { struct completion pd_gate; struct qmp *qmp; + struct a6xx_hfi_msg_bw_table *bw_table; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 702b8d4b3497..019610341df1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -68,6 +68,8 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; @@ -81,12 +83,17 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->preempt_lock, flags); - - /* Make sure everything is posted before making a decision */ - mb(); + /* Update HW if this is the current ring and we are not in preempt*/ + if (!a6xx_in_preempt(a6xx_gpu)) { + if (a6xx_gpu->cur_ring == ring) + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + else + ring->restore_wptr = true; + } else { + ring->restore_wptr = true; + } - gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + spin_unlock_irqrestore(&ring->preempt_lock, flags); } static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, @@ -110,7 +117,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) + if (ctx->seqno == ring->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -148,12 +155,14 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, /* * Write the new TTBR0 to the memstore. This is good for debugging. + * Needed for preemption */ - OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_PKT7(ring, CP_MEM_WRITE, 5); OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); OUT_RING(ring, lower_32_bits(ttbr)); - OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + OUT_RING(ring, upper_32_bits(ttbr)); + OUT_RING(ring, ctx->seqno); /* * Sync both threads after switching pagetables and enable BR only @@ -229,7 +238,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -278,6 +287,46 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_flush(gpu, ring); } +static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring, + struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue) +{ + u64 preempt_postamble; + + OUT_PKT7(ring, CP_SET_PSEUDO_REG, 12); + + OUT_RING(ring, SMMU_INFO); + /* don't save SMMU, we write the record from the kernel instead */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* privileged and non secure buffer save */ + OUT_RING(ring, NON_SECURE_SAVE_ADDR); + OUT_RING(ring, lower_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + OUT_RING(ring, upper_32_bits( + a6xx_gpu->preempt_iova[ring->id])); + + /* user context buffer save, seems to be unnused by fw */ + OUT_RING(ring, NON_PRIV_SAVE_ADDR); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + OUT_RING(ring, COUNTER); + /* seems OK to set to 0 to disable it */ + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* Emit postamble to clear perfcounters */ + preempt_postamble = a6xx_gpu->preempt_postamble_iova; + + OUT_PKT7(ring, CP_SET_AMBLE, 3); + OUT_RING(ring, lower_32_bits(preempt_postamble)); + OUT_RING(ring, upper_32_bits(preempt_postamble)); + OUT_RING(ring, CP_SET_AMBLE_2_DWORDS( + a6xx_gpu->preempt_postamble_len) | + CP_SET_AMBLE_2_TYPE(KMD_AMBLE_TYPE)); +} + static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; @@ -295,6 +344,13 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_set_pagetable(a6xx_gpu, ring, submit); + /* + * If preemption is enabled, then set the pseudo register for the save + * sequence + */ + if (gpu->nr_rings > 1) + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue); + get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, @@ -306,8 +362,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x101); /* IFPC disable */ - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00d); /* IB1LIST start */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00d); /* IB1LIST start */ + } /* Submit the commands */ for (i = 0; i < submit->nr_cmds; i++) { @@ -315,7 +373,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -338,8 +396,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) update_shadow_rptr(gpu, ring); } - OUT_PKT7(ring, CP_SET_MARKER, 1); - OUT_RING(ring, 0x00e); /* IB1LIST end */ + if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) { + OUT_PKT7(ring, CP_SET_MARKER, 1); + OUT_RING(ring, 0x00e); /* IB1LIST end */ + } get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); @@ -386,6 +446,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); OUT_RING(ring, submit->seqno); + a6xx_gpu->last_seqno[ring->id] = submit->seqno; + /* write the ringbuffer timestamp */ OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27)); @@ -399,10 +461,32 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x100); /* IFPC enable */ + /* If preemption is enabled */ + if (gpu->nr_rings > 1) { + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + + /* + * If dword[2:1] are non zero, they specify an address for + * the CP to write the value of dword[3] to on preemption + * complete. Write 0 to skip the write + */ + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Data value - not used if the address above is 0 */ + OUT_RING(ring, 0x01); + /* generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + } + + trace_msm_gpu_submit_flush(submit, gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); a6xx_flush(gpu, ring); + + /* Check to see if we need to start preemption */ + a6xx_preempt_trigger(gpu); } static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) @@ -551,6 +635,15 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.macrotile_mode = 1; } + if (adreno_is_a663(gpu)) { + gpu->ubwc_config.highest_bank_bit = 13; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; + gpu->ubwc_config.macrotile_mode = 1; + gpu->ubwc_config.ubwc_swizzle = 0x4; + } + if (adreno_is_7c3(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.amsbc = 1; @@ -609,6 +702,77 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) adreno_gpu->ubwc_config.macrotile_mode); } +static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + const struct adreno_reglist_list *reglist; + void *ptr = a6xx_gpu->pwrup_reglist_ptr; + struct cpu_gpu_lock *lock = ptr; + u32 *dest = (u32 *)&lock->regs[0]; + int i; + + reglist = adreno_gpu->info->a6xx->pwrup_reglist; + + lock->gpu_req = lock->cpu_req = lock->turn = 0; + lock->ifpc_list_len = 0; + lock->preemption_list_len = reglist->count; + + /* + * For each entry in each of the lists, write the offset and the current + * register value into the GPU buffer + */ + for (i = 0; i < reglist->count; i++) { + *dest++ = reglist->regs[i]; + *dest++ = gpu_read(gpu, reglist->regs[i]); + } + + /* + * The overall register list is composed of + * 1. Static IFPC-only registers + * 2. Static IFPC + preemption registers + * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects) + * + * The first two lists are static. Size of these lists are stored as + * number of pairs in ifpc_list_len and preemption_list_len + * respectively. With concurrent binning, Some of the perfcounter + * registers being virtualized, CP needs to know the pipe id to program + * the aperture inorder to restore the same. Thus, third list is a + * dynamic list with triplets as + * (<aperture, shifted 12 bits> <address> <data>), and the length is + * stored as number for triplets in dynamic_list_len. + */ + lock->dynamic_list_len = 0; +} + +static int a7xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings <= 1) + return 0; + + /* Turn CP protection off */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Generate interrupt on preemption completion */ + OUT_RING(ring, 0x00); + + a6xx_flush(gpu, ring); + + return a6xx_idle(gpu, ring) ? 0 : -EINVAL; +} + static int a6xx_cp_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -640,6 +804,8 @@ static int a6xx_cp_init(struct msm_gpu *gpu) static int a7xx_cp_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[0]; u32 mask; @@ -677,11 +843,11 @@ static int a7xx_cp_init(struct msm_gpu *gpu) /* *Don't* send a power up reg list for concurrent binning (TODO) */ /* Lo address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* Hi address */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova)); /* BIT(31) set => read the regs from the list */ - OUT_RING(ring, 0x00000000); + OUT_RING(ring, BIT(31)); a6xx_flush(gpu, ring); return a6xx_idle(gpu, ring) ? 0 : -EINVAL; @@ -805,6 +971,16 @@ static int a6xx_ucode_load(struct msm_gpu *gpu) msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); } + a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, + &a6xx_gpu->pwrup_reglist_iova); + + if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr)) + return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr); + + msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist"); + return 0; } @@ -864,6 +1040,7 @@ static int hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u64 gmem_range_min; + unsigned int i; int ret; if (!adreno_has_gmu_wrapper(adreno_gpu)) { @@ -1072,7 +1249,7 @@ static int hw_init(struct msm_gpu *gpu) if (adreno_is_a690(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); /* Set dualQ + disable afull for A660 GPU */ - else if (adreno_is_a660(adreno_gpu)) + else if (adreno_is_a660(adreno_gpu) || adreno_is_a663(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); else if (adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, @@ -1134,22 +1311,32 @@ static int hw_init(struct msm_gpu *gpu) if (a6xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); + for (unsigned int i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; } /* ..which means "always" on A7xx, also for BV shadow */ if (adreno_is_a7xx(adreno_gpu)) { gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, - rbmemptr(gpu->rb[0], bv_fence)); + rbmemptr(gpu->rb[0], bv_rptr)); } + a6xx_preempt_hw_init(gpu); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - gpu->cur_ctx_seqno = 0; + for (i = 0; i < gpu->nr_rings; i++) + gpu->rb[i]->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); + if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) { + a7xx_patch_pwrup_reglist(gpu); + a6xx_gpu->pwrup_reglist_emitted = true; + } + ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); if (ret) goto out; @@ -1187,6 +1374,10 @@ static int hw_init(struct msm_gpu *gpu) out: if (adreno_has_gmu_wrapper(adreno_gpu)) return ret; + + /* Last step - yield the ringbuffer */ + a7xx_preempt_start(gpu); + /* * Tell the GMU that we are done touching the GPU and it can start power * management @@ -1564,8 +1755,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) a7xx_sw_fuse_violation_irq(gpu); - if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { msm_gpu_retire(gpu); + a6xx_preempt_trigger(gpu); + } + + if (status & A6XX_RBBM_INT_0_MASK_CP_SW) + a6xx_preempt_irq(gpu); return IRQ_HANDLED; } @@ -2259,6 +2455,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; + extern int enable_preemption; bool is_a7xx; int ret; @@ -2297,7 +2494,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (is_a7xx) + if ((enable_preemption == 1) || (enable_preemption == -1 && + (config->info->quirks & ADRENO_QUIRK_PREEMPTION))) + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4); + else if (is_a7xx) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); else if (adreno_has_gmu_wrapper(adreno_gpu)) ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); @@ -2338,6 +2538,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) a6xx_fault_handler); a6xx_calc_ubwc_config(adreno_gpu); + /* Set up the preemption specific bits and pieces for each ringbuffer */ + a6xx_preempt_init(gpu); return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0fb7febf70e7..4aceffb6aae8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -12,15 +12,35 @@ extern bool hang_debug; +struct cpu_gpu_lock { + uint32_t gpu_req; + uint32_t cpu_req; + uint32_t turn; + union { + struct { + uint16_t list_length; + uint16_t list_offset; + }; + struct { + uint8_t ifpc_list_len; + uint8_t preemption_list_len; + uint16_t dynamic_list_len; + }; + }; + uint64_t regs[62]; +}; + /** * struct a6xx_info - a6xx specific information from device table * * @hwcg: hw clock gating register sequence * @protect: CP_PROTECT settings + * @pwrup_reglist pwrup reglist for preemption */ struct a6xx_info { const struct adreno_reglist *hwcg; const struct adreno_protect *protect; + const struct adreno_reglist_list *pwrup_reglist; u32 gmu_chipid; u32 gmu_cgc_mode; u32 prim_fifo_threshold; @@ -33,6 +53,29 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + void *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS]; + void *preempt_smmu[MSM_GPU_MAX_RINGS]; + uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; + spinlock_t eval_lock; + struct timer_list preempt_timer; + + unsigned int preempt_level; + bool uses_gmem; + bool skip_save_restore; + + struct drm_gem_object *preempt_postamble_bo; + void *preempt_postamble_ptr; + uint64_t preempt_postamble_iova; + uint64_t preempt_postamble_len; + bool postamble_enabled; struct a6xx_gmu gmu; @@ -40,6 +83,11 @@ struct a6xx_gpu { uint64_t shadow_iova; uint32_t *shadow; + struct drm_gem_object *pwrup_reglist_bo; + void *pwrup_reglist_ptr; + uint64_t pwrup_reglist_iova; + bool pwrup_reglist_emitted; + bool has_whereami; void __iomem *llc_mmio; @@ -52,6 +100,100 @@ struct a6xx_gpu { #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) /* + * In order to do lockless preemption we use a simple state machine to progress + * through the process. + * + * PREEMPT_NONE - no preemption in progress. Next state START. + * PREEMPT_START - The trigger is evaluating if preemption is possible. Next + * states: TRIGGERED, NONE + * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next + * state: NONE. + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next + * states: FAULTED, PENDING + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger + * recovery. Next state: N/A + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is + * checking the success of the operation. Next state: FAULTED, NONE. + */ + +enum a6xx_preempt_state { + PREEMPT_NONE = 0, + PREEMPT_START, + PREEMPT_FINISH, + PREEMPT_TRIGGERED, + PREEMPT_FAULTED, + PREEMPT_PENDING, +}; + +/* + * struct a6xx_preempt_record is a shared buffer between the microcode and the + * CPU to store the state for preemption. The record itself is much larger + * (2112k) but most of that is used by the CP for storage. + * + * There is a preemption record assigned per ringbuffer. When the CPU triggers a + * preemption, it fills out the record with the useful information (wptr, ring + * base, etc) and the microcode uses that information to set up the CP following + * the preemption. When a ring is switched out, the CP will save the ringbuffer + * state back to the record. In this way, once the records are properly set up + * the CPU can quickly switch back and forth between ringbuffers by only + * updating a few registers (often only the wptr). + * + * These are the CPU aware registers in the record: + * @magic: Must always be 0xAE399D6EUL + * @info: Type of the record - written 0 by the CPU, updated by the CP + * @errno: preemption error record + * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP + * @_pad: Reserved/padding + * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + * @rbase: Value of RB_BASE written by CPU, save/restored by CP + * @counter: GPU address of the storage area for the preemption counters + * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP + */ +struct a6xx_preempt_record { + u32 magic; + u32 info; + u32 errno; + u32 data; + u32 cntl; + u32 rptr; + u32 wptr; + u32 _pad; + u64 rptr_addr; + u64 rbase; + u64 counter; + u64 bv_rptr_addr; +}; + +#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL + +#define PREEMPT_SMMU_INFO_SIZE 4096 + +#define PREEMPT_RECORD_SIZE(adreno_gpu) \ + ((adreno_gpu->info->preempt_record_size) == 0 ? \ + 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size)) + +/* + * The preemption counter block is a storage area for the value of the + * preemption counters that are saved immediately before context switch. We + * append it on to the end of the allocation for the preemption record. + */ +#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4) + +struct a7xx_cp_smmu_info { + u32 magic; + u32 _pad4; + u64 ttbr0; + u32 asid; + u32 context_idr; + u32 context_bank; +}; + +#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL + +/* * Given a register and a count, return a value to program into * REG_CP_PROTECT_REG(n) - this will block both reads and writes for * _len + 1 registers starting at _reg. @@ -108,6 +250,34 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); +void a6xx_preempt_init(struct msm_gpu *gpu); +void a6xx_preempt_hw_init(struct msm_gpu *gpu); +void a6xx_preempt_trigger(struct msm_gpu *gpu); +void a6xx_preempt_irq(struct msm_gpu *gpu); +void a6xx_preempt_fini(struct msm_gpu *gpu); +int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); +void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue); + +/* Return true if we are in a preempt state */ +static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu) +{ + /* + * Make sure the read to preempt_state is ordered with respect to reads + * of other variables before ... + */ + smp_rmb(); + + int preempt_state = atomic_read(&a6xx_gpu->preempt_state); + + /* ... and after. */ + smp_rmb(); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_FINISH); +} + void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, bool suspended); unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index cdb3f6e74d3e..cb8844ed46b2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -478,6 +478,37 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x500b4; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50058; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* @@ -630,32 +661,44 @@ static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) { - struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_hfi_msg_bw_table *msg; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + if (gmu->bw_table) + goto send; + + msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + if (adreno_is_a618(adreno_gpu)) - a618_build_bw_table(&msg); + a618_build_bw_table(msg); else if (adreno_is_a619(adreno_gpu)) - a619_build_bw_table(&msg); + a619_build_bw_table(msg); else if (adreno_is_a640_family(adreno_gpu)) - a640_build_bw_table(&msg); + a640_build_bw_table(msg); else if (adreno_is_a650(adreno_gpu)) - a650_build_bw_table(&msg); + a650_build_bw_table(msg); else if (adreno_is_7c3(adreno_gpu)) - adreno_7c3_build_bw_table(&msg); + adreno_7c3_build_bw_table(msg); else if (adreno_is_a660(adreno_gpu)) - a660_build_bw_table(&msg); + a660_build_bw_table(msg); + else if (adreno_is_a663(adreno_gpu)) + a663_build_bw_table(msg); else if (adreno_is_a690(adreno_gpu)) - a690_build_bw_table(&msg); + a690_build_bw_table(msg); else if (adreno_is_a730(adreno_gpu)) - a730_build_bw_table(&msg); + a730_build_bw_table(msg); else if (adreno_is_a740_family(adreno_gpu)) - a740_build_bw_table(&msg); + a740_build_bw_table(msg); else - a6xx_build_bw_table(&msg); + a6xx_build_bw_table(msg); + + gmu->bw_table = msg; - return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), +send: + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)), NULL, 0); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c new file mode 100644 index 000000000000..2fd4e39f618f --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2023 Collabora, Ltd. */ +/* Copyright (c) 2024 Valve Corporation */ + +#include "msm_gem.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu, + enum a6xx_preempt_state old, enum a6xx_preempt_state new) +{ + enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a6xx_gpu *gpu, + enum a6xx_preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after*/ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + spin_lock_irqsave(&ring->preempt_lock, flags); + + if (ring->restore_wptr) { + wptr = get_wptr(ring); + + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); + + ring->restore_wptr = false; + } + + spin_unlock_irqrestore(&ring->preempt_lock, flags); +} + +/* Return the highest priority ringbuffer with something in it */ +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a6xx_gpu->cur_ring) + empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i]; + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + +static void a6xx_preempt_timer(struct timer_list *t) +{ + struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer); + struct msm_gpu *gpu = &a6xx_gpu->base.base; + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) + return; + + dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + u32 count = 0; + + postamble[count++] = PKT7(CP_REG_RMW, 3); + postamble[count++] = REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD; + postamble[count++] = 0; + postamble[count++] = 1; + + postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6); + postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ); + postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS); + postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0); + postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1); + postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1); + postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0); + + a6xx_gpu->preempt_postamble_len = count; + + a6xx_gpu->postamble_enabled = true; +} + +static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu) +{ + u32 *postamble = a6xx_gpu->preempt_postamble_ptr; + + /* + * Disable the postamble by replacing the first packet header with a NOP + * that covers the whole buffer. + */ + *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1)); + + a6xx_gpu->postamble_enabled = false; +} + +void a6xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + del_timer(&a6xx_gpu->preempt_timer); + + /* + * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL + * to zero before firing the interrupt, but there is a non zero chance + * of a hardware condition or a software race that could set it again + * before we have a chance to finish. If that happens, log and go for + * recovery + */ + status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status & A6XX_CP_CONTEXT_SWITCH_CNTL_STOP)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n"); + set_preempt_state(a6xx_gpu, PREEMPT_FAULTED); + dev_err(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); + return; + } + + a6xx_gpu->cur_ring = a6xx_gpu->next_ring; + a6xx_gpu->next_ring = NULL; + + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + + update_wptr(gpu, a6xx_gpu->cur_ring); + + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id); + + /* + * Retrigger preemption to avoid a deadlock that might occur when preemption + * is skipped due to it being already in flight when requested. + */ + a6xx_preempt_trigger(gpu); +} + +void a6xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings == 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i]; + + record_ptr->wptr = 0; + record_ptr->rptr = 0; + record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]); + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rbase = gpu->rb[i]->iova; + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0); + + /* Enable the GMEM save/restore feature for preemption */ + gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1); + + /* Reset the preemption state */ + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + + spin_lock_init(&a6xx_gpu->eval_lock); + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; +} + +void a6xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + unsigned int cntl; + bool sysprof; + + if (gpu->nr_rings == 1) + return; + + /* + * Lock to make sure another thread attempting preemption doesn't skip it + * while we are still evaluating the next ring. This makes sure the other + * thread does start preemption if we abort it and avoids a soft lock. + */ + spin_lock_irqsave(&a6xx_gpu->eval_lock, flags); + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) { + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + cntl = A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level); + + if (a6xx_gpu->skip_save_restore) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE; + + if (a6xx_gpu->uses_gmem) + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM; + + cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_STOP; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a6xx_gpu->cur_ring == ring)) { + set_preempt_state(a6xx_gpu, PREEMPT_FINISH); + update_wptr(gpu, a6xx_gpu->cur_ring); + set_preempt_state(a6xx_gpu, PREEMPT_NONE); + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + return; + } + + spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + struct a7xx_cp_smmu_info *smmu_info_ptr = + a6xx_gpu->preempt_smmu[ring->id]; + struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id]; + u64 ttbr0 = ring->memptrs->ttbr0; + u32 context_idr = ring->memptrs->context_idr; + + smmu_info_ptr->ttbr0 = ttbr0; + smmu_info_ptr->context_idr = context_idr; + record_ptr->wptr = get_wptr(ring); + + /* + * The GPU will write the wptr we set above when we preempt. Reset + * restore_wptr to make sure that we don't write WPTR to the same + * thing twice. It's still possible subsequent submissions will update + * wptr again, in which case they will set the flag to true. This has + * to be protected by the lock for setting the flag and updating wptr + * to be atomic. + */ + ring->restore_wptr = false; + + trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, + a6xx_gpu->preempt_smmu_iova[ring->id]); + + gpu_write64(gpu, + REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR, + a6xx_gpu->preempt_iova[ring->id]); + + a6xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Enable or disable postamble as needed */ + sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + + if (!sysprof && !a6xx_gpu->postamble_enabled) + preempt_prepare_postamble(a6xx_gpu); + + if (sysprof && a6xx_gpu->postamble_enabled) + preempt_disable_postamble(a6xx_gpu); + + /* Set the preemption state to triggered */ + set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED); + + /* Trigger the preemption */ + gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl); +} + +static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct drm_gem_object *bo = NULL; + phys_addr_t ttbr; + u64 iova = 0; + void *ptr; + int asid; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_RECORD_SIZE(adreno_gpu), + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_RECORD_SIZE(adreno_gpu)); + + msm_gem_object_set_name(bo, "preempt_record ring%d", ring->id); + + a6xx_gpu->preempt_bo[ring->id] = bo; + a6xx_gpu->preempt_iova[ring->id] = iova; + a6xx_gpu->preempt[ring->id] = ptr; + + struct a6xx_preempt_record *record_ptr = ptr; + + ptr = msm_gem_kernel_new(gpu->dev, + PREEMPT_SMMU_INFO_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + memset(ptr, 0, PREEMPT_SMMU_INFO_SIZE); + + msm_gem_object_set_name(bo, "preempt_smmu_info ring%d", ring->id); + + a6xx_gpu->preempt_smmu_bo[ring->id] = bo; + a6xx_gpu->preempt_smmu_iova[ring->id] = iova; + a6xx_gpu->preempt_smmu[ring->id] = ptr; + + struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; + + msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid); + + smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; + smmu_info_ptr->ttbr0 = ttbr; + smmu_info_ptr->asid = 0xdecafbad; + smmu_info_ptr->context_idr = 0; + + /* Set up the defaults on the preemption record */ + record_ptr->magic = A6XX_PREEMPT_RECORD_MAGIC; + record_ptr->info = 0; + record_ptr->data = 0; + record_ptr->rptr = 0; + record_ptr->wptr = 0; + record_ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; + record_ptr->rbase = ring->iova; + record_ptr->counter = 0; + record_ptr->bv_rptr_addr = rbmemptr(ring, bv_rptr); + + return 0; +} + +void a6xx_preempt_fini(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) + msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace); +} + +void a6xx_preempt_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings <= 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + if (preempt_init_ring(a6xx_gpu, gpu->rb[i])) + goto fail; + } + + /* TODO: make this configurable? */ + a6xx_gpu->preempt_level = 1; + a6xx_gpu->uses_gmem = 1; + a6xx_gpu->skip_save_restore = 1; + + a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev, + PAGE_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY, + gpu->aspace, &a6xx_gpu->preempt_postamble_bo, + &a6xx_gpu->preempt_postamble_iova); + + preempt_prepare_postamble(a6xx_gpu); + + if (IS_ERR(a6xx_gpu->preempt_postamble_ptr)) + goto fail; + + timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0); + + return; +fail: + /* + * On any failure our adventure is over. Clean up and + * set nr_rings to 1 to force preemption off + */ + a6xx_preempt_fini(gpu); + gpu->nr_rings = 1; + + DRM_DEV_ERROR(&gpu->pdev->dev, + "preemption init failed, disabling preemption\n"); + + return; +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index cfc74a9e2646..9ffe91920fbf 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -20,6 +20,10 @@ bool allow_vram_carveout = false; MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); +int enable_preemption = -1; +MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); +module_param(enable_preemption, int, 0600); + extern const struct adreno_gpulist a2xx_gpulist; extern const struct adreno_gpulist a3xx_gpulist; extern const struct adreno_gpulist a4xx_gpulist; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 465a4cd14a43..75f5367e73ca 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -533,7 +533,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu) if (!adreno_gpu->info->fw[i]) continue; - /* Skip loading GMU firwmare with GMU Wrapper */ + /* Skip loading GMU firmware with GMU Wrapper */ if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU) continue; @@ -572,8 +572,19 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, int adreno_hw_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + VERB("%s", gpu->name); + if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 && + qcom_scm_set_gpu_smmu_aperture_is_available()) { + /* We currently always use context bank 0, so hard code this */ + ret = qcom_scm_set_gpu_smmu_aperture(0); + if (ret) + DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret); + } + for (int i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 58d7e7915c57..e71f420f8b3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -56,6 +56,7 @@ enum adreno_family { #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) +#define ADRENO_QUIRK_PREEMPTION BIT(5) /* Helper for formating the chip_id in the way that userspace tools like * crashdec expect. @@ -111,6 +112,7 @@ struct adreno_info { * {SHRT_MAX, 0} sentinal. */ struct adreno_speedbin *speedbins; + u64 preempt_record_size; }; #define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 } @@ -156,6 +158,19 @@ static const struct adreno_protect name = { \ .count_max = __count_max, \ }; +struct adreno_reglist_list { + /** @reg: List of register **/ + const u32 *regs; + /** @count: Number of registers in the list **/ + u32 count; +}; + +#define DECLARE_ADRENO_REGLIST_LIST(name) \ +static const struct adreno_reglist_list name = { \ + .regs = name ## _regs, \ + .count = ARRAY_SIZE(name ## _regs), \ +}; + struct adreno_gpu { struct msm_gpu base; const struct adreno_info *info; @@ -455,6 +470,11 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu) return adreno_is_revn(gpu, 680); } +static inline int adreno_is_a663(const struct adreno_gpu *gpu) +{ + return gpu->info->chip_ids[0] == 0x06060300; +} + static inline int adreno_is_a690(const struct adreno_gpu *gpu) { return gpu->info->chip_ids[0] == 0x06090000; @@ -656,12 +676,15 @@ OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) OUT_RING(ring, PKT4(regindx, cnt)); } +#define PKT7(opcode, cnt) \ + (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \ + ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)) + static inline void OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) { adreno_wait_ring(ring, cnt + 1); - OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | - ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); + OUT_RING(ring, PKT7(opcode, cnt)); } struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h new file mode 100644 index 000000000000..ab3dfb0b374e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8937_H +#define _DPU_1_14_MSM8937_H + +static const struct dpu_caps msm8937_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8937_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8937_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8937_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8937_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8937_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8937_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8937_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8937_perf_data = { + .max_bw_low = 3100000, + .max_bw_high = 3100000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8937_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 14, +}; + +const struct dpu_mdss_cfg dpu_msm8937_cfg = { + .mdss_ver = &msm8937_mdss_ver, + .caps = &msm8937_dpu_caps, + .mdp = msm8937_mdp, + .ctl_count = ARRAY_SIZE(msm8937_ctl), + .ctl = msm8937_ctl, + .sspp_count = ARRAY_SIZE(msm8937_sspp), + .sspp = msm8937_sspp, + .mixer_count = ARRAY_SIZE(msm8937_lm), + .mixer = msm8937_lm, + .dspp_count = ARRAY_SIZE(msm8937_dspp), + .dspp = msm8937_dspp, + .pingpong_count = ARRAY_SIZE(msm8937_pp), + .pingpong = msm8937_pp, + .intf_count = ARRAY_SIZE(msm8937_intf), + .intf = msm8937_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8937_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h new file mode 100644 index 000000000000..6bdaecca6761 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_14_MSM8917_H +#define _DPU_1_14_MSM8917_H + +static const struct dpu_caps msm8917_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 16 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8917_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8917_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8917_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8917_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, +}; + +static const struct dpu_pingpong_cfg msm8917_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, +}; + +static const struct dpu_dspp_cfg msm8917_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8917_intf[] = { + { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8917_perf_data = { + .max_bw_low = 1800000, + .max_bw_high = 1800000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8917_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 15, +}; + +const struct dpu_mdss_cfg dpu_msm8917_cfg = { + .mdss_ver = &msm8917_mdss_ver, + .caps = &msm8917_dpu_caps, + .mdp = msm8917_mdp, + .ctl_count = ARRAY_SIZE(msm8917_ctl), + .ctl = msm8917_ctl, + .sspp_count = ARRAY_SIZE(msm8917_sspp), + .sspp = msm8917_sspp, + .mixer_count = ARRAY_SIZE(msm8917_lm), + .mixer = msm8917_lm, + .dspp_count = ARRAY_SIZE(msm8917_dspp), + .dspp = msm8917_dspp, + .pingpong_count = ARRAY_SIZE(msm8917_pp), + .pingpong = msm8917_pp, + .intf_count = ARRAY_SIZE(msm8917_intf), + .intf = msm8917_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8917_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h new file mode 100644 index 000000000000..14f36ea6ad0e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef _DPU_1_16_MSM8953_H +#define _DPU_1_16_MSM8953_H + +static const struct dpu_caps msm8953_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .max_linewidth = DEFAULT_DPU_LINE_WIDTH, + .pixel_ram_size = 40 * 1024, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8953_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8953_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8953_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8953_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8953_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR), + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, +}; + +static const struct dpu_lm_cfg msm8953_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + }, +}; + +static const struct dpu_pingpong_cfg msm8953_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_dspp_cfg msm8953_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8953_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 14, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8953_perf_data = { + .max_bw_low = 3400000, + .max_bw_high = 3400000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 14, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8953_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 16, +}; + +const struct dpu_mdss_cfg dpu_msm8953_cfg = { + .mdss_ver = &msm8953_mdss_ver, + .caps = &msm8953_dpu_caps, + .mdp = msm8953_mdp, + .ctl_count = ARRAY_SIZE(msm8953_ctl), + .ctl = msm8953_ctl, + .sspp_count = ARRAY_SIZE(msm8953_sspp), + .sspp = msm8953_sspp, + .mixer_count = ARRAY_SIZE(msm8953_lm), + .mixer = msm8953_lm, + .dspp_count = ARRAY_SIZE(msm8953_dspp), + .dspp = msm8953_dspp, + .pingpong_count = ARRAY_SIZE(msm8953_pp), + .pingpong = msm8953_pp, + .intf_count = ARRAY_SIZE(msm8953_intf), + .intf = msm8953_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8953_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h new file mode 100644 index 000000000000..491f6f5827d1 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Limited + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_1_7_MSM8996_H +#define _DPU_1_7_MSM8996_H + +static const struct dpu_caps msm8996_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x7, + .has_src_split = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_mdp_cfg msm8996_mdp[] = { + { + .name = "top_0", + .base = 0x0, .len = 0x454, + .features = BIT(DPU_MDP_VSYNC_SEL), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB2] = { .reg_off = 0x2bc, .bit_off = 4 }, + [DPU_CLK_CTRL_RGB3] = { .reg_off = 0x2c4, .bit_off = 4 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 }, + [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 }, + }, + }, +}; + +static const struct dpu_ctl_cfg msm8996_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x64, + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x64, + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x64, + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0x64, + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0x64, + }, +}; + +static const struct dpu_sspp_cfg msm8996_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x150, + .features = VIG_MSM8996_MASK, + .sblk = &dpu_vig_sblk_qseed2, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_4", .id = SSPP_RGB0, + .base = 0x14000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 1, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB0, + }, { + .name = "sspp_5", .id = SSPP_RGB1, + .base = 0x16000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 5, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB1, + }, { + .name = "sspp_6", .id = SSPP_RGB2, + .base = 0x18000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 9, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB2, + }, { + .name = "sspp_7", .id = SSPP_RGB3, + .base = 0x1a000, .len = 0x150, + .features = RGB_MSM8996_MASK, + .sblk = &dpu_rgb_sblk, + .xin_id = 13, + .type = SSPP_TYPE_RGB, + .clk_ctrl = DPU_CLK_CTRL_RGB3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 2, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x150, + .features = DMA_MSM8996_MASK, + .sblk = &dpu_dma_sblk, + .xin_id = 10, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, +}; + +static const struct dpu_lm_cfg msm8996_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_2, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x320, + .features = MIXER_MSM8998_MASK, + .sblk = &msm8998_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + }, +}; + +static const struct dpu_pingpong_cfg msm8996_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x70000, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x70800, .len = 0xd4, + .features = PINGPONG_MSM8996_TE2_MASK, + .sblk = &msm8996_pp_sblk_te, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x71000, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x71800, .len = 0xd4, + .features = PINGPONG_MSM8996_MASK, + .sblk = &msm8996_pp_sblk, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15), + }, +}; + +static const struct dpu_dsc_cfg msm8996_dsc[] = { + { + .name = "dsc_0", .id = DSC_0, + .base = 0x80000, .len = 0x140, + }, { + .name = "dsc_1", .id = DSC_1, + .base = 0x80400, .len = 0x140, + }, +}; + +static const struct dpu_dspp_cfg msm8996_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &msm8998_dspp_sblk, + }, +}; + +static const struct dpu_intf_cfg msm8996_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x6a000, .len = 0x268, + .type = INTF_NONE, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x6a800, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x6b000, .len = 0x268, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = -1, + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x6b800, .len = 0x268, + .type = INTF_HDMI, + .prog_fetch_lines_worst_case = 25, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + .intr_tear_rd_ptr = -1, + }, +}; + +static const struct dpu_perf_cfg msm8996_perf_data = { + .max_bw_low = 9600000, + .max_bw_high = 9600000, + .min_core_ib = 2400000, + .min_llcc_ib = 0, /* No LLCC on this SoC */ + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 21, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version msm8996_mdss_ver = { + .core_major_ver = 1, + .core_minor_ver = 7, +}; + +const struct dpu_mdss_cfg dpu_msm8996_cfg = { + .mdss_ver = &msm8996_mdss_ver, + .caps = &msm8996_dpu_caps, + .mdp = msm8996_mdp, + .ctl_count = ARRAY_SIZE(msm8996_ctl), + .ctl = msm8996_ctl, + .sspp_count = ARRAY_SIZE(msm8996_sspp), + .sspp = msm8996_sspp, + .mixer_count = ARRAY_SIZE(msm8996_lm), + .mixer = msm8996_lm, + .dspp_count = ARRAY_SIZE(msm8996_dspp), + .dspp = msm8996_dspp, + .pingpong_count = ARRAY_SIZE(msm8996_pp), + .pingpong = msm8996_pp, + .dsc_count = ARRAY_SIZE(msm8996_dsc), + .dsc = msm8996_dsc, + .intf_count = ARRAY_SIZE(msm8996_intf), + .intf = msm8996_intf, + .vbif_count = ARRAY_SIZE(msm8996_vbif), + .vbif = msm8996_vbif, + .perf = &msm8996_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 1d3e9666c741..64c94e919a69 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -157,18 +157,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = { .lm_pair = LM_5, .pingpong = PINGPONG_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x47000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x48000, .len = 0x320, - .features = MIXER_MSM8998_MASK, - .sblk = &msm8998_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_MSM8998_MASK, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 7a23389a5732..72bd4f7e9e50 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -156,25 +156,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = { .pingpong = PINGPONG_2, .dspp = DSPP_2, }, { - .name = "lm_3", .id = LM_3, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - .dspp = DSPP_3, - }, { - .name = "lm_4", .id = LM_4, - .base = 0x0, .len = 0x320, - .features = MIXER_SDM845_MASK, - .sblk = &sdm845_lm_sblk, - .pingpong = PINGPONG_NONE, - }, { .name = "lm_5", .id = LM_5, .base = 0x49000, .len = 0x320, .features = MIXER_SDM845_MASK, .sblk = &sdm845_lm_sblk, .lm_pair = LM_2, .pingpong = PINGPONG_3, + .dspp = DSPP_3, }, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h new file mode 100644 index 000000000000..907b4d7ceb47 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DPU_8_4_SA8775P_H +#define _DPU_8_4_SA8775P_H + +static const struct dpu_caps sa8775p_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 5120, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg sa8775p_mdp = { + .name = "top_0", + .base = 0x0, .len = 0x494, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 }, + [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 }, + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ +static const struct dpu_ctl_cfg sa8775p_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x204, + .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x204, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_sspp_cfg sa8775p_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG1, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG2, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x32c, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG3, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x32c, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA2, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x32c, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA3, + }, +}; + +static const struct dpu_lm_cfg sa8775p_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + .dspp = DSPP_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + .dspp = DSPP_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, +}; + +static const struct dpu_dspp_cfg sa8775p_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg sa8775p_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_6", .id = PINGPONG_6, + .base = 0x65800, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_7", .id = PINGPONG_7, + .base = 0x65c00, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, +}; + +static const struct dpu_merge_3d_cfg sa8775p_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x8, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x8, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x8, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x65f00, .len = 0x8, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg sa8775p_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_2_0", .id = DSC_4, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_2_1", .id = DSC_5, + .base = 0x82000, .len = 0x4, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, +}; + +static const struct dpu_wb_cfg sa8775p_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), + .clk_ctrl = DPU_CLK_CTRL_WB2, + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */ +static const struct dpu_intf_cfg sa8775p_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, { + .name = "intf_4", .id = INTF_4, + .base = 0x38000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21), + }, { + .name = "intf_6", .id = INTF_6, + .base = 0x3A000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), + }, { + .name = "intf_7", .id = INTF_7, + .base = 0x3b000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19), + }, { + .name = "intf_8", .id = INTF_8, + .base = 0x3c000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_NONE, + .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */ + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13), + }, +}; + +static const struct dpu_perf_cfg sa8775p_perf_data = { + .max_bw_low = 13600000, + .max_bw_high = 18200000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + /* FIXME: lut tables */ + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xfff0, 0x1}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile), + .entries = sm6350_qos_linear_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version sa8775p_mdss_ver = { + .core_major_ver = 8, + .core_minor_ver = 4, +}; + +const struct dpu_mdss_cfg dpu_sa8775p_cfg = { + .mdss_ver = &sa8775p_mdss_ver, + .caps = &sa8775p_dpu_caps, + .mdp = &sa8775p_mdp, + .cdm = &sc7280_cdm, + .ctl_count = ARRAY_SIZE(sa8775p_ctl), + .ctl = sa8775p_ctl, + .sspp_count = ARRAY_SIZE(sa8775p_sspp), + .sspp = sa8775p_sspp, + .mixer_count = ARRAY_SIZE(sa8775p_lm), + .mixer = sa8775p_lm, + .dspp_count = ARRAY_SIZE(sa8775p_dspp), + .dspp = sa8775p_dspp, + .pingpong_count = ARRAY_SIZE(sa8775p_pp), + .pingpong = sa8775p_pp, + .dsc_count = ARRAY_SIZE(sa8775p_dsc), + .dsc = sa8775p_dsc, + .merge_3d_count = ARRAY_SIZE(sa8775p_merge_3d), + .merge_3d = sa8775p_merge_3d, + .wb_count = ARRAY_SIZE(sa8775p_wb), + .wb = sa8775p_wb, + .intf_count = ARRAY_SIZE(sa8775p_intf), + .intf = sa8775p_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &sa8775p_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h index 7c286bafb948..e7183cf05776 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h @@ -8,72 +8,26 @@ #include "dpu_kms.h" #include "dpu_hw_interrupts.h" -/** - * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_preinstall(struct msm_kms *kms); -/** - * dpu_core_irq_uninstall - uninstall core IRQ handler - * @kms: MSM KMS handle - * @return: none - */ void dpu_core_irq_uninstall(struct msm_kms *kms); -/** - * dpu_core_irq - core IRQ handler - * @kms: MSM KMS handle - * @return: interrupt handling status - */ irqreturn_t dpu_core_irq(struct msm_kms *kms); -/** - * dpu_core_irq_read - IRQ helper function for reading IRQ status - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: non-zero if irq detected; otherwise no irq detected - */ u32 dpu_core_irq_read( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_core_irq_register_callback - For registering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @irq_cb: IRQ callback funcion. - * @irq_arg: IRQ callback argument. - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_register_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), void *irq_arg); -/** - * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ - * interrupt - * @dpu_kms: DPU handle - * @irq_idx: irq index - * @return: 0 for success registering callback, otherwise failure - * - * This function supports registration of multiple callbacks for each interrupt. - */ int dpu_core_irq_unregister_callback( struct dpu_kms *dpu_kms, unsigned int irq_idx); -/** - * dpu_debugfs_core_irq_init - register core irq debugfs - * @dpu_kms: pointer to kms - * @parent: debugfs directory root - */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 68fae048a9a8..6f0a37f954fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg, mode = &state->adjusted_mode; - crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); drm_atomic_crtc_for_each_plane(plane, crtc) { pstate = to_dpu_plane_state(plane->state); @@ -140,6 +140,12 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, perf->max_per_pipe_ib, perf->bw_ctl); } +/** + * dpu_core_perf_crtc_check - validate performance of the given crtc state + * @crtc: Pointer to crtc + * @state: Pointer to new crtc state + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -301,6 +307,12 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) return clk_rate; } +/** + * dpu_core_perf_crtc_update - update performance of the given crtc + * @crtc: Pointer to crtc + * @params_changed: true if crtc parameters are modified + * return: zero if success, or error code otherwise + */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed) { @@ -446,6 +458,11 @@ static const struct file_operations dpu_core_perf_mode_fops = { .write = _dpu_core_perf_mode_write, }; +/** + * dpu_core_perf_debugfs_init - initialize debugfs for core performance context + * @dpu_kms: Pointer to the dpu_kms struct + * @parent: Pointer to parent debugfs + */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) { struct dpu_core_perf *perf = &dpu_kms->perf; @@ -482,6 +499,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) } #endif +/** + * dpu_core_perf_init - initialize the given core performance context + * @perf: Pointer to core performance context + * @perf_cfg: Pointer to platform performance configuration + * @max_core_clk_rate: Maximum core clock rate + */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h index 4186977390bd..451bf8021114 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -54,47 +54,20 @@ struct dpu_core_perf { u64 fix_core_ab_vote; }; -/** - * dpu_core_perf_crtc_check - validate performance of the given crtc state - * @crtc: Pointer to crtc - * @state: Pointer to new crtc state - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state); -/** - * dpu_core_perf_crtc_update - update performance of the given crtc - * @crtc: Pointer to crtc - * @params_changed: true if crtc parameters are modified - * return: zero if success, or error code otherwise - */ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, int params_changed); -/** - * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc - * @crtc: Pointer to crtc - */ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc); -/** - * dpu_core_perf_init - initialize the given core performance context - * @perf: Pointer to core performance context - * @perf_cfg: Pointer to platform performance configuration - * @max_core_clk_rate: Maximum core clock rate - */ int dpu_core_perf_init(struct dpu_core_perf *perf, const struct dpu_perf_cfg *perf_cfg, unsigned long max_core_clk_rate); struct dpu_kms; -/** - * dpu_core_perf_debugfs_init - initialize debugfs for core performance context - * @dpu_kms: Pointer to the dpu_kms struct - * @debugfs_parent: Pointer to parent debugfs - */ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent); #endif /* _DPU_CORE_PERF_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index db6c57900781..9f6ffd344693 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -572,6 +572,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev->event_lock, flags); } +/** + * dpu_crtc_get_intf_mode - get interface mode of the given crtc + * @crtc: Pointert to crtc + */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -594,6 +598,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) return INTF_MODE_NONE; } +/** + * dpu_crtc_vblank_callback - called on vblank irq, issues completion events + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -704,6 +712,10 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); } +/** + * dpu_crtc_complete_commit - callback signalling completion of current commit + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); @@ -934,6 +946,10 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) return rc; } +/** + * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -1230,6 +1246,24 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, return 0; } +static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + + /* + * max crtc width is equal to the max mixer width * 2 and max height is 4K + */ + return drm_mode_validate_size(mode, + 2 * dpu_kms->catalog->caps->max_mixer_width, + 4096); +} + +/** + * dpu_crtc_vblank - enable or disable vblanks for this crtc + * @crtc: Pointer to drm crtc object + * @en: true to enable vblanks, false to disable + */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -1445,10 +1479,19 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { .atomic_check = dpu_crtc_atomic_check, .atomic_begin = dpu_crtc_atomic_begin, .atomic_flush = dpu_crtc_atomic_flush, + .mode_valid = dpu_crtc_mode_valid, .get_scanout_position = dpu_crtc_get_scanout_position, }; -/* initialize crtc */ +/** + * dpu_crtc_init - create a new crtc object + * @dev: dpu device + * @plane: base plane + * @cursor: cursor plane + * @return: new crtc object or error + * + * initialize CRTC + */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index febc3e764a63..0b148f3ce0d7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -239,55 +239,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL; } -/** - * dpu_crtc_vblank - enable or disable vblanks for this crtc - * @crtc: Pointer to drm crtc object - * @en: true to enable vblanks, false to disable - */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en); -/** - * dpu_crtc_vblank_callback - called on vblank irq, issues completion events - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc); -/** - * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc); -/** - * dpu_crtc_complete_commit - callback signalling completion of current commit - * @crtc: Pointer to drm crtc object - */ void dpu_crtc_complete_commit(struct drm_crtc *crtc); -/** - * dpu_crtc_init - create a new crtc object - * @dev: dpu device - * @plane: base plane - * @cursor: cursor plane - * @Return: new crtc object or error - */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor); -/** - * dpu_crtc_register_custom_event - api for enabling/disabling crtc event - * @kms: Pointer to dpu_kms - * @crtc_drm: Pointer to crtc object - * @event: Event that client is interested - * @en: Flag to enable/disable the event - */ -int dpu_crtc_register_custom_event(struct dpu_kms *kms, - struct drm_crtc *crtc_drm, u32 event, bool en); - -/** - * dpu_crtc_get_intf_mode - get interface mode of the given crtc - * @crtc: Pointert to crtc - */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc); /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index bd3698bf0cf7..83de7564e2c1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -217,6 +217,10 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = { 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 }; +/** + * dpu_encoder_get_drm_fmt - return DRM fourcc format + * @phys_enc: Pointer to physical encoder structure + */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -235,6 +239,11 @@ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc) return DRM_FORMAT_RGB888; } +/** + * dpu_encoder_needs_periph_flush - return true if physical encoder requires + * peripheral flush + * @phys_enc: Pointer to physical encoder structure + */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *drm_enc; @@ -253,6 +262,10 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc) msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode); } +/** + * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc; @@ -272,6 +285,11 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) return false; } +/** + * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled + * for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) { const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -279,6 +297,12 @@ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) return dpu_enc->dsc ? true : false; } +/** + * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained + * in virtual encoder that can collect CRC values + * @drm_enc: Pointer to previously created drm encoder structure + * Returns: Number of physical encoders for given drm encoder + */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -297,6 +321,10 @@ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) return num_intf; } +/** + * dpu_encoder_setup_misr - enable misr calculations + * @drm_enc: Pointer to previously created drm encoder structure + */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -315,6 +343,13 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_get_crc - get the crc value from interface blocks + * @drm_enc: Pointer to previously created drm encoder structure + * @crcs: array to fill with CRC data + * @pos: offset into the @crcs array + * Returns: 0 on success, error otherwise + */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) { struct dpu_encoder_virt *dpu_enc; @@ -385,6 +420,12 @@ static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) } } +/** + * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has + * timed out, including reporting frame error event to crtc and debug dump + * @phys_enc: Pointer to physical encoder structure + * @intr_idx: Failing interrupt index + */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx) { @@ -402,6 +443,15 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, u32 irq_idx, struct dpu_encoder_wait_info *info); +/** + * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. + * note: will call dpu_encoder_helper_wait_for_irq on timeout + * @phys_enc: Pointer to physical encoder structure + * @irq_idx: IRQ index + * @func: IRQ callback to be called in case of timeout + * @wait_info: wait info struct + * @return: 0 or -ERROR + */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq_idx, void (*func)(void *arg), @@ -473,6 +523,10 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, return ret; } +/** + * dpu_encoder_get_vsync_count - get vsync count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -480,6 +534,10 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) return phys ? atomic_read(&phys->vsync_cnt) : 0; } +/** + * dpu_encoder_get_linecount - get interface line count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -495,6 +553,13 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) return linecount; } +/** + * dpu_encoder_helper_split_config - split display configuration helper function + * This helper function may be used by physical encoders to configure + * the split display related registers. + * @phys_enc: Pointer to physical encoder structure + * @interface: enum dpu_intf setting + */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface) @@ -544,6 +609,10 @@ void dpu_encoder_helper_split_config( } } +/** + * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. + * @drm_enc: Pointer to previously created drm encoder structure + */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -560,6 +629,12 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) return (num_dsc > 0) && (num_dsc > intf_count); } +/** + * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder + * This helper function is used by physical encoder to get DSC config + * used for this encoder. + * @drm_enc: Pointer to encoder structure + */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) { struct msm_drm_private *priv = drm_enc->dev->dev_private; @@ -1089,6 +1164,11 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, return 0; } +/** + * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1106,6 +1186,11 @@ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job) { @@ -1248,6 +1333,10 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } } +/** + * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs + * @drm_enc: encoder pointer + */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1389,6 +1478,12 @@ static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catal return NULL; } +/** + * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1411,6 +1506,12 @@ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_vblank_callback"); } +/** + * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception + * @drm_enc: Pointer to drm encoder structure + * @phy_enc: Pointer to physical encoder + * Note: This is called from IRQ handler context. + */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc) { @@ -1429,6 +1530,11 @@ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, DPU_ATRACE_END("encoder_underrun_callback"); } +/** + * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to + * @drm_enc: encoder pointer + * @crtc: crtc pointer + */ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1441,6 +1547,13 @@ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if + * the encoder is assigned to the given crtc + * @drm_enc: encoder pointer + * @crtc: crtc pointer + * @enable: true if vblank should be enabled + */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc, bool enable) { @@ -1465,6 +1578,13 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, } } +/** + * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys + * encoder completes last request frame + * @drm_enc: Pointer to drm encoder structure + * @ready_phys: Pointer to physical encoder + * @event: Event to process + */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event) @@ -1587,6 +1707,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) phys->ops.trigger_start(phys); } +/** + * dpu_encoder_helper_trigger_start - control start helper function + * This helper function may be optionally specified by physical + * encoders if they require ctl_start triggering. + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; @@ -1708,6 +1834,11 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); } +/** + * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous + * kickoff and trigger the ctl prepare progress for command mode display. + * @drm_enc: encoder pointer + */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1784,6 +1915,11 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, return line_time; } +/** + * dpu_encoder_vsync_time - get the time of the next vsync + * @drm_enc: encoder pointer + * @wakeup_time: pointer to ktime_t to write the vsync time to + */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) { struct drm_display_mode *mode; @@ -1930,6 +2066,13 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, dsc, dsc_common_mode, initial_lines); } +/** + * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl + * path (i.e. ctl flush and start) at next appropriate time. + * Immediately: if no previous commit is outstanding. + * Delayed: Block until next trigger can be issued. + * @drm_enc: encoder pointer + */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1966,6 +2109,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); } +/** + * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. + * @drm_enc: Pointer to drm encoder structure + */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1987,6 +2134,11 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) return true; } +/** + * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path + * (i.e. ctl flush and start) immediately. + * @drm_enc: encoder pointer + */ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -2085,6 +2237,10 @@ static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc) } } +/** + * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline + * @phys_enc: Pointer to physical encoder structure + */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; @@ -2168,6 +2324,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) ctl->ops.clear_pending_flush(ctl); } +/** + * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block + * @phys_enc: Pointer to physical encoder + * @dpu_fmt: Pinter to the format description + * @output_type: HDMI/WB + */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type) @@ -2472,6 +2634,13 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = { .debugfs_init = dpu_encoder_debugfs_init, }; +/** + * dpu_encoder_init - initialize virtual encoder object + * @dev: Pointer to drm device structure + * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant + * @disp_info: Pointer to display information structure + * Returns: Pointer to newly created drm encoder + */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info) @@ -2593,6 +2762,10 @@ int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc) return ret; } +/** + * dpu_encoder_get_intf_mode - get interface mode of the given encoder + * @encoder: Pointer to drm encoder object + */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) { struct dpu_encoder_virt *dpu_enc = NULL; @@ -2612,6 +2785,12 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) return INTF_MODE_NONE; } +/** + * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder + * This helper function is used by physical encoder to get DSC blocks mask + * used for this encoder. + * @phys_enc: Pointer to physical encoder structure + */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) { struct drm_encoder *encoder = phys_enc->parent; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index f7465a1774aa..92b5ee390788 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -19,6 +19,8 @@ #define IDLE_TIMEOUT (66 - 16/2) +#define MAX_H_TILES_PER_DISPLAY 2 + /** * struct msm_display_info - defines display properties * @intf_type: INTF_ type @@ -36,159 +38,54 @@ struct msm_display_info { enum dpu_vsync_source vsync_source; }; -/** - * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to - * @encoder: encoder pointer - * @crtc: crtc pointer - */ void dpu_encoder_assign_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc); -/** - * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if - * the encoder is assigned to the given crtc - * @encoder: encoder pointer - * @crtc: crtc pointer - * @enable: true if vblank should be enabled - */ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder, struct drm_crtc *crtc, bool enable); -/** - * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl - * path (i.e. ctl flush and start) at next appropriate time. - * Immediately: if no previous commit is outstanding. - * Delayed: Block until next trigger can be issued. - * @encoder: encoder pointer - */ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous - * kickoff and trigger the ctl prepare progress for command mode display. - * @encoder: encoder pointer - */ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder); -/** - * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path - * (i.e. ctl flush and start) immediately. - * @encoder: encoder pointer - */ void dpu_encoder_kickoff(struct drm_encoder *encoder); -/** - * dpu_encoder_wakeup_time - get the time of the next vsync - */ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder); int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder); -/* - * dpu_encoder_get_intf_mode - get interface mode of the given encoder - * @encoder: Pointer to drm encoder object - */ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder); -/** - * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs - * @encoder: encoder pointer - */ void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder); -/** - * dpu_encoder_init - initialize virtual encoder object - * @dev: Pointer to drm device structure - * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant - * @disp_info: Pointer to display information structure - * Returns: Pointer to newly created drm encoder - */ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, int drm_enc_mode, struct msm_display_info *disp_info); -/** - * dpu_encoder_set_idle_timeout - set the idle timeout for video - * and command mode encoders. - * @drm_enc: Pointer to previously created drm encoder structure - * @idle_timeout: idle timeout duration in milliseconds - */ -void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, - u32 idle_timeout); -/** - * dpu_encoder_get_linecount - get interface line count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_vsync_count - get vsync count for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled - * for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained - * in virtual encoder that can collect CRC values - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: Number of physical encoders for given drm encoder - */ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc); -/** - * dpu_encoder_setup_misr - enable misr calculations - * @drm_enc: Pointer to previously created drm encoder structure - */ void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder); -/** - * dpu_encoder_get_crc - get the crc value from interface blocks - * @drm_enc: Pointer to previously created drm encoder structure - * Returns: 0 on success, error otherwise - */ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos); -/** - * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. - * @drm_enc: Pointer to previously created drm encoder structure - */ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc); -/** - * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. - * @drm_enc: Pointer to previously created drm encoder structure - * @job: Pointer to the current drm writeback job - */ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, struct drm_writeback_job *job); -/** - * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. - * @drm_enc: Pointer to drm encoder structure - */ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index e77ebe3a68da..63f09857025c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -279,37 +279,15 @@ struct dpu_encoder_wait_info { s64 timeout_ms; }; -/** - * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder - * @dev: Corresponding device for devres management - * @p: Pointer to init params structure - * Return: Error code or newly allocated encoder - */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_phys_wb_init - initialize writeback encoder - * @dev: Corresponding device for devres management - * @init: Pointer to init info structure with initialization params - */ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); -/** - * dpu_encoder_helper_trigger_start - control start helper function - * This helper function may be optionally specified by physical - * encoders if they require ctl_start triggering. - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc); static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( @@ -331,106 +309,38 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( return BLEND_3D_NONE; } -/** - * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder - * This helper function is used by physical encoder to get DSC blocks mask - * used for this encoder. - * @phys_enc: Pointer to physical encoder structure - */ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder - * This helper function is used by physical encoder to get DSC config - * used for this encoder. - * @drm_enc: Pointer to encoder structure - */ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc); -/** - * dpu_encoder_get_drm_fmt - return DRM fourcc format - * @phys_enc: Pointer to physical encoder structure - */ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_needs_periph_flush - return true if physical encoder requires - * peripheral flush - * @phys_enc: Pointer to physical encoder structure - */ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_split_config - split display configuration helper function - * This helper function may be used by physical encoders to configure - * the split display related registers. - * @phys_enc: Pointer to physical encoder structure - * @interface: enum dpu_intf setting - */ void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface); -/** - * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has - * timed out, including reporting frame error event to crtc and debug dump - * @phys_enc: Pointer to physical encoder structure - * @intr_idx: Failing interrupt index - */ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, enum dpu_intr_idx intr_idx); -/** - * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. - * note: will call dpu_encoder_helper_wait_for_irq on timeout - * @phys_enc: Pointer to physical encoder structure - * @irq: IRQ index - * @func: IRQ callback to be called in case of timeout - * @wait_info: wait info struct - * @Return: 0 or -ERROR - */ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsigned int irq, void (*func)(void *arg), struct dpu_encoder_wait_info *wait_info); -/** - * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline - * @phys_enc: Pointer to physical encoder structure - */ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); -/** - * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block - * @phys_enc: Pointer to physical encoder - * @output_type: HDMI/WB - */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, const struct msm_format *dpu_fmt, u32 output_type); -/** - * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * Note: This is called from IRQ handler context. - */ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc); -/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame - * @drm_enc: Pointer to drm encoder structure - * @phys_enc: Pointer to physical encoder - * @event: Event to process - */ void dpu_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct dpu_encoder_phys *ready_phys, u32 event); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 6fc31d47cd1d..e9bbccc44dad 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -720,6 +720,12 @@ static void dpu_encoder_phys_cmd_init_ops( ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; } +/** + * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index d8a2edebfe8c..abd6600046cb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -746,6 +746,12 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; } +/** + * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder + * @dev: Corresponding device for devres management + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 07035ab77b79..4c006ec74575 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -166,10 +166,10 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc) /** * dpu_encoder_phys_wb_setup_fb - setup output framebuffer * @phys_enc: Pointer to physical encoder - * @fb: Pointer to output framebuffer + * @format: Format of the framebuffer */ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, - struct drm_framebuffer *fb) + const struct msm_format *format) { struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); struct dpu_hw_wb *hw_wb; @@ -193,12 +193,12 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, hw_wb->ops.setup_roi(hw_wb, wb_cfg); if (hw_wb->ops.setup_outformat) - hw_wb->ops.setup_outformat(hw_wb, wb_cfg); + hw_wb->ops.setup_outformat(hw_wb, wb_cfg, format); if (hw_wb->ops.setup_cdp) { const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf; - hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format, + hw_wb->ops.setup_cdp(hw_wb, format, perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable); } @@ -321,15 +321,10 @@ static void dpu_encoder_phys_wb_setup( { struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; struct drm_display_mode mode = phys_enc->cached_mode; - struct drm_framebuffer *fb = NULL; struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); - struct drm_writeback_job *wb_job; const struct msm_format *format; - const struct msm_format *dpu_fmt; - wb_job = wb_enc->wb_job; format = msm_framebuffer_format(wb_enc->wb_job->fb); - dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier); DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n", hw_wb->idx - WB_0, mode.name, @@ -341,9 +336,9 @@ static void dpu_encoder_phys_wb_setup( dpu_encoder_phys_wb_set_qos(phys_enc); - dpu_encoder_phys_wb_setup_fb(phys_enc, fb); + dpu_encoder_phys_wb_setup_fb(phys_enc, format); - dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB); + dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB); dpu_encoder_phys_wb_setup_ctl(phys_enc); } @@ -587,26 +582,20 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc format = msm_framebuffer_format(job->fb); - wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base, - format->pixel_format, job->fb->modifier); - if (!wb_cfg->dest.format) { - /* this error should be detected during atomic_check */ - DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format); - return; - } - - ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest); + ret = dpu_format_populate_plane_sizes(job->fb, &wb_cfg->dest); if (ret) { - DPU_DEBUG("failed to populate layout %d\n", ret); + DPU_DEBUG("failed to populate plane sizes%d\n", ret); return; } + dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest); + wb_cfg->dest.width = job->fb->width; wb_cfg->dest.height = job->fb->height; - wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; + wb_cfg->dest.num_planes = format->num_planes; - if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) && - (wb_cfg->dest.format->element[0] == C1_B_Cb)) + if ((format->fetch_type == MDP_PLANE_PLANAR) && + (format->element[0] == C1_B_Cb)) swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]); DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 6b1e9a617da3..59c9427da7dd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -13,9 +13,6 @@ #define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096 -#define DPU_MAX_IMG_WIDTH 0x3FFF -#define DPU_MAX_IMG_HEIGHT 0x3FFF - /* * struct dpu_media_color_map - maps drm format to media format * @format: DRM base pixel format @@ -93,10 +90,9 @@ static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt) return color_fmt; } -static int _dpu_format_get_plane_sizes_ubwc( +static int _dpu_format_populate_plane_sizes_ubwc( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, + struct drm_framebuffer *fb, struct dpu_hw_fmt_layout *layout) { int i; @@ -104,9 +100,8 @@ static int _dpu_format_get_plane_sizes_ubwc( bool meta = MSM_FORMAT_IS_UBWC(fmt); memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; color = _dpu_format_get_media_color_ubwc(fmt); @@ -116,19 +111,19 @@ static int _dpu_format_get_plane_sizes_ubwc( return -EINVAL; } - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { uint32_t y_sclines, uv_sclines; uint32_t y_meta_scanlines = 0; uint32_t uv_meta_scanlines = 0; layout->num_planes = 2; - layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width); - y_sclines = VENUS_Y_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_Y_STRIDE(color, fb->width); + y_sclines = VENUS_Y_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width); - uv_sclines = VENUS_UV_SCANLINES(color, height); + layout->plane_pitch[1] = VENUS_UV_STRIDE(color, fb->width); + uv_sclines = VENUS_UV_SCANLINES(color, fb->height); layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] * uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -136,13 +131,13 @@ static int _dpu_format_get_plane_sizes_ubwc( goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width); - y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, fb->width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); - layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width); - uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height); + layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, fb->width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, fb->height); layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] * uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); @@ -151,16 +146,16 @@ static int _dpu_format_get_plane_sizes_ubwc( layout->num_planes = 1; - layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width); - rgb_scanlines = VENUS_RGB_SCANLINES(color, height); + layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, fb->width); + rgb_scanlines = VENUS_RGB_SCANLINES(color, fb->height); layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); if (!meta) goto done; layout->num_planes += 2; - layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width); - rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height); + layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, fb->width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, fb->height); layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); } @@ -172,26 +167,23 @@ done: return 0; } -static int _dpu_format_get_plane_sizes_linear( +static int _dpu_format_populate_plane_sizes_linear( const struct msm_format *fmt, - const uint32_t width, - const uint32_t height, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { int i; memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); - layout->format = fmt; - layout->width = width; - layout->height = height; + layout->width = fb->width; + layout->height = fb->height; layout->num_planes = fmt->num_planes; /* Due to memset above, only need to set planes of interest */ if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) { layout->num_planes = 1; - layout->plane_size[0] = width * height * layout->format->bpp; - layout->plane_pitch[0] = width * layout->format->bpp; + layout->plane_size[0] = fb->width * fb->height * fmt->bpp; + layout->plane_pitch[0] = fb->width * fmt->bpp; } else { uint32_t v_subsample, h_subsample; uint32_t chroma_samp; @@ -201,7 +193,7 @@ static int _dpu_format_get_plane_sizes_linear( _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample, &h_subsample); - if (width % h_subsample || height % v_subsample) { + if (fb->width % h_subsample || fb->height % v_subsample) { DRM_ERROR("mismatch in subsample vs dimensions\n"); return -EINVAL; } @@ -209,11 +201,11 @@ static int _dpu_format_get_plane_sizes_linear( if ((fmt->pixel_format == DRM_FORMAT_NV12) && (MSM_FORMAT_IS_DX(fmt))) bpp = 2; - layout->plane_pitch[0] = width * bpp; + layout->plane_pitch[0] = fb->width * bpp; layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; - layout->plane_size[0] = layout->plane_pitch[0] * height; + layout->plane_size[0] = layout->plane_pitch[0] * fb->height; layout->plane_size[1] = layout->plane_pitch[1] * - (height / v_subsample); + (fb->height / v_subsample); if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) { layout->num_planes = 2; @@ -234,8 +226,13 @@ static int _dpu_format_get_plane_sizes_linear( * all the components based on ubwc specifications. */ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) { - if (pitches && layout->plane_pitch[i] < pitches[i]) - layout->plane_pitch[i] = pitches[i]; + if (layout->plane_pitch[i] <= fb->pitches[i]) { + layout->plane_pitch[i] = fb->pitches[i]; + } else { + DRM_DEBUG("plane %u expected pitch %u, fb %u\n", + i, layout->plane_pitch[i], fb->pitches[i]); + return -EINVAL; + } } for (i = 0; i < DPU_MAX_PLANES; i++) @@ -244,53 +241,54 @@ static int _dpu_format_get_plane_sizes_linear( return 0; } -static int dpu_format_get_plane_sizes( - const struct msm_format *fmt, - const uint32_t w, - const uint32_t h, - struct dpu_hw_fmt_layout *layout, - const uint32_t *pitches) +/** + * dpu_format_populate_plane_sizes - populate non-address part of the layout based on + * fb, and format found in the fb + * @fb: framebuffer pointer + * @layout: format layout structure to populate + * + * Return: error code on failure or 0 if new addresses were populated + */ +int dpu_format_populate_plane_sizes( + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - if (!layout || !fmt) { + const struct msm_format *fmt; + + if (!layout || !fb) { DRM_ERROR("invalid pointer\n"); return -EINVAL; } - if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) { + if (fb->width > DPU_MAX_IMG_WIDTH || + fb->height > DPU_MAX_IMG_HEIGHT) { DRM_ERROR("image dimensions outside max range\n"); return -ERANGE; } + fmt = msm_framebuffer_format(fb); + if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt)) - return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); + return _dpu_format_populate_plane_sizes_ubwc(fmt, fb, layout); - return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); + return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout); } -static int _dpu_format_populate_addrs_ubwc( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { + const struct msm_format *fmt; uint32_t base_addr = 0; bool meta; - if (!fb || !layout) { - DRM_ERROR("invalid pointers\n"); - return -EINVAL; - } - - if (aspace) - base_addr = msm_framebuffer_iova(fb, aspace, 0); - if (!base_addr) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } + base_addr = msm_framebuffer_iova(fb, aspace, 0); - meta = MSM_FORMAT_IS_UBWC(layout->format); + fmt = msm_framebuffer_format(fb); + meta = MSM_FORMAT_IS_UBWC(fmt); /* Per-format logic for verifying active planes */ - if (MSM_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(fmt)) { /************************************************/ /* UBWC ** */ /* buffer ** DPU PLANE */ @@ -319,7 +317,7 @@ static int _dpu_format_populate_addrs_ubwc( + layout->plane_size[2] + layout->plane_size[3]; if (!meta) - return 0; + return; /* configure Y metadata plane */ layout->plane_addr[2] = base_addr; @@ -350,119 +348,43 @@ static int _dpu_format_populate_addrs_ubwc( layout->plane_addr[1] = 0; if (!meta) - return 0; + return; layout->plane_addr[2] = base_addr; layout->plane_addr[3] = 0; } - return 0; } -static int _dpu_format_populate_addrs_linear( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { unsigned int i; - /* Can now check the pitches given vs pitches expected */ - for (i = 0; i < layout->num_planes; ++i) { - if (layout->plane_pitch[i] > fb->pitches[i]) { - DRM_ERROR("plane %u expected pitch %u, fb %u\n", - i, layout->plane_pitch[i], fb->pitches[i]); - return -EINVAL; - } - } - /* Populate addresses for simple formats here */ - for (i = 0; i < layout->num_planes; ++i) { - if (aspace) - layout->plane_addr[i] = - msm_framebuffer_iova(fb, aspace, i); - if (!layout->plane_addr[i]) { - DRM_ERROR("failed to retrieve base addr\n"); - return -EFAULT; - } - } - - return 0; + for (i = 0; i < layout->num_planes; ++i) + layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i); } -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, - struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *layout) +/** + * dpu_format_populate_addrs - populate buffer addresses based on + * mmu, fb, and format found in the fb + * @aspace: address space pointer + * @fb: framebuffer pointer + * @layout: format layout structure to populate + */ +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) { - int ret; - - if (!fb || !layout) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } + const struct msm_format *fmt; - if ((fb->width > DPU_MAX_IMG_WIDTH) || - (fb->height > DPU_MAX_IMG_HEIGHT)) { - DRM_ERROR("image dimensions outside max range\n"); - return -ERANGE; - } - - layout->format = msm_framebuffer_format(fb); - - /* Populate the plane sizes etc via get_format */ - ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height, - layout, fb->pitches); - if (ret) - return ret; + fmt = msm_framebuffer_format(fb); /* Populate the addresses given the fb */ - if (MSM_FORMAT_IS_UBWC(layout->format) || - MSM_FORMAT_IS_TILE(layout->format)) - ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout); + if (MSM_FORMAT_IS_UBWC(fmt) || + MSM_FORMAT_IS_TILE(fmt)) + _dpu_format_populate_addrs_ubwc(aspace, fb, layout); else - ret = _dpu_format_populate_addrs_linear(aspace, fb, layout); - - return ret; -} - -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos) -{ - const struct drm_format_info *info; - struct dpu_hw_fmt_layout layout; - uint32_t bos_total_size = 0; - int ret, i; - - if (!fmt || !cmd || !bos) { - DRM_ERROR("invalid arguments\n"); - return -EINVAL; - } - - info = drm_format_info(fmt->pixel_format); - if (!info) - return -EINVAL; - - ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height, - &layout, cmd->pitches); - if (ret) - return ret; - - for (i = 0; i < info->num_planes; i++) { - if (!bos[i]) { - DRM_ERROR("invalid handle for plane %d\n", i); - return -EINVAL; - } - if ((i == 0) || (bos[i] != bos[0])) - bos_total_size += bos[i]->size; - } - - if (bos_total_size < layout.total_size) { - DRM_ERROR("buffers total size too small %u expected %u\n", - bos_total_size, layout.total_size); - return -EINVAL; - } - - return 0; + _dpu_format_populate_addrs_linear(aspace, fb, layout); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h index 210d0ed5f0af..c6145d43aa3f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -31,35 +31,12 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats, return false; } -/** - * dpu_format_check_modified_format - validate format and buffers for - * dpu non-standard, i.e. modified format - * @kms: kms driver - * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format - * @cmd: fb_cmd2 structure user request - * @bos: gem buffer object list - * - * Return: error code on failure, 0 on success - */ -int dpu_format_check_modified_format( - const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); +void dpu_format_populate_addrs(struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout); -/** - * dpu_format_populate_layout - populate the given format layout based on - * mmu, fb, and format found in the fb - * @aspace: address space pointer - * @fb: framebuffer pointer - * @fmtl: format layout structure to populate - * - * Return: error code on failure, -EAGAIN if success but the addresses - * are the same as before or 0 if new addresses were populated - */ -int dpu_format_populate_layout( - struct msm_gem_address_space *aspace, +int dpu_format_populate_plane_sizes( struct drm_framebuffer *fb, - struct dpu_hw_fmt_layout *fmtl); + struct dpu_hw_fmt_layout *layout); #endif /*_DPU_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index dcb4fd85e73b..2cbf41f33cc0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -21,6 +21,16 @@ (VIG_BASE_MASK | \ BIT(DPU_SSPP_CSC_10BIT)) +#define VIG_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS) |\ + BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + +#define VIG_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_QSEED2) |\ + BIT(DPU_SSPP_CSC)) + #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) @@ -32,6 +42,12 @@ #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) +#define DMA_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define DMA_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_CDP)) + #define DMA_MSM8998_MASK \ (BIT(DPU_SSPP_QOS) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ @@ -57,9 +73,19 @@ #define DMA_CURSOR_SDM845_MASK_SDMA \ (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) +#define DMA_CURSOR_MSM8996_MASK \ + (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR)) + #define DMA_CURSOR_MSM8998_MASK \ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) +#define RGB_MSM8953_MASK \ + (BIT(DPU_SSPP_QOS)) + +#define RGB_MSM8996_MASK \ + (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_RGB)) + #define MIXER_MSM8998_MASK \ (BIT(DPU_MIXER_SOURCESPLIT)) @@ -69,6 +95,12 @@ #define MIXER_QCM2290_MASK \ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) +#define PINGPONG_MSM8996_MASK \ + (BIT(DPU_PINGPONG_DSC)) + +#define PINGPONG_MSM8996_TE2_MASK \ + (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2)) + #define PINGPONG_SDM845_MASK \ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC)) @@ -115,10 +147,6 @@ #define MAX_HORZ_DECIMATION 4 #define MAX_VERT_DECIMATION 4 -#define MAX_UPSCALE_RATIO 20 -#define MAX_DOWNSCALE_RATIO 4 -#define SSPP_UNITY_SCALE 1 - #define STRCAT(X, Y) (X Y) static const uint32_t plane_formats[] = { @@ -276,8 +304,6 @@ static const u32 wb2_formats_rgb_yuv[] = { /* SSPP common configuration */ #define _VIG_SBLK(scaler_ver) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -285,15 +311,11 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = NULL, \ } #define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \ { \ - .maxdwnscale = MAX_DOWNSCALE_RATIO, \ - .maxupscale = MAX_UPSCALE_RATIO, \ .scaler_blk = {.name = "scaler", \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ @@ -301,29 +323,40 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = rot_cfg, \ } #define _VIG_SBLK_NOSCALE() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +/* qseed2 is not supported, so disabled scaling */ +#define _VIG_SBLK_QSEED2() \ + { \ + .scaler_blk = {.name = "scaler", \ + /* no version for qseed2 */ \ + .base = 0x200, .len = 0xa0,}, \ + .csc_blk = {.name = "csc", \ + .base = 0x320, .len = 0x100,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .rotation_cfg = NULL, \ + } + +#define _RGB_SBLK() \ + { \ + .scaler_blk = {.name = "scaler", \ + .base = 0x200, .len = 0x28,}, \ + .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ } #define _DMA_SBLK() \ { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ } static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { @@ -332,6 +365,9 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { .rot_format_list = rotation_v2_formats, }; +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed2 = + _VIG_SBLK_QSEED2(); + static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale = _VIG_SBLK_NOSCALE(); @@ -363,6 +399,8 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 = static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 = _VIG_SBLK(SSPP_SCALER_VER(3, 3)); +static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK(); + static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); /************************************************************* @@ -427,6 +465,15 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { /************************************************************* * PINGPONG sub blocks config *************************************************************/ +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = { + .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, + .version = 0x1}, +}; + +static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = { + /* No dither block */ +}; + static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, .version = 0x1}, @@ -492,6 +539,34 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { }, }; +static const struct dpu_vbif_cfg msm8996_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1040, + .default_ot_rd_limit = 32, + .default_ot_wr_limit = 16, + .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x20, + .dynamic_ot_rd_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .dynamic_ot_wr_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl), + .priority_lvl = msm8998_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl), + .priority_lvl = msm8998_nrt_pri_lvl, + }, + }, +}; + static const struct dpu_vbif_cfg msm8998_vbif[] = { { .name = "vbif_rt", .id = VBIF_RT, @@ -675,6 +750,11 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { * Hardware catalog *************************************************************/ +#include "catalog/dpu_1_7_msm8996.h" +#include "catalog/dpu_1_14_msm8937.h" +#include "catalog/dpu_1_15_msm8917.h" +#include "catalog/dpu_1_16_msm8953.h" + #include "catalog/dpu_3_0_msm8998.h" #include "catalog/dpu_3_2_sdm660.h" #include "catalog/dpu_3_3_sdm630.h" @@ -699,6 +779,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_8_0_sc8280xp.h" #include "catalog/dpu_8_1_sm8450.h" +#include "catalog/dpu_8_4_sa8775p.h" #include "catalog/dpu_9_0_sm8550.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 37e18e820a20..c701d18c3522 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -21,8 +21,8 @@ #define DPU_HW_BLK_NAME_LEN 16 -#define MAX_IMG_WIDTH 0x3fff -#define MAX_IMG_HEIGHT 0x3fff +#define DPU_MAX_IMG_WIDTH 0x3fff +#define DPU_MAX_IMG_HEIGHT 0x3fff #define CRTC_DUAL_MIXERS 2 @@ -364,21 +364,15 @@ struct dpu_caps { /** * struct dpu_sspp_sub_blks : SSPP sub-blocks * common: Pointer to common configurations shared by sub blocks - * @maxdwnscale: max downscale ratio supported(without DECIMATION) - * @maxupscale: maxupscale ratio supported * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps * @qseed_ver: qseed version * @scaler_blk: * @csc_blk: * @format_list: Pointer to list of supported formats * @num_formats: Number of supported formats - * @virt_format_list: Pointer to list of supported formats for virtual planes - * @virt_num_formats: Number of supported formats for virtual planes * @dpu_rotation_cfg: inline rotation configuration */ struct dpu_sspp_sub_blks { - u32 maxdwnscale; - u32 maxupscale; u32 max_per_pipe_bw; u32 qseed_ver; struct dpu_scaler_blk scaler_blk; @@ -386,8 +380,6 @@ struct dpu_sspp_sub_blks { const u32 *format_list; u32 num_formats; - const u32 *virt_format_list; - u32 virt_num_formats; const struct dpu_rotation_cfg *rotation_cfg; }; @@ -831,6 +823,10 @@ struct dpu_mdss_cfg { const struct dpu_format_extended *vig_formats; }; +extern const struct dpu_mdss_cfg dpu_msm8917_cfg; +extern const struct dpu_mdss_cfg dpu_msm8937_cfg; +extern const struct dpu_mdss_cfg dpu_msm8953_cfg; +extern const struct dpu_mdss_cfg dpu_msm8996_cfg; extern const struct dpu_mdss_cfg dpu_msm8998_cfg; extern const struct dpu_mdss_cfg dpu_sdm630_cfg; extern const struct dpu_mdss_cfg dpu_sdm660_cfg; @@ -850,6 +846,7 @@ extern const struct dpu_mdss_cfg dpu_sm8350_cfg; extern const struct dpu_mdss_cfg dpu_sc7280_cfg; extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; extern const struct dpu_mdss_cfg dpu_sm8450_cfg; +extern const struct dpu_mdss_cfg dpu_sa8775p_cfg; extern const struct dpu_mdss_cfg dpu_sm8550_cfg; extern const struct dpu_mdss_cfg dpu_sm8650_cfg; extern const struct dpu_mdss_cfg dpu_x1e80100_cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c index 55d2768a6d4d..ae1534c49ae0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -222,6 +222,14 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_ DPU_REG_WRITE(c, CDM_MUX, mux_cfg); } +/** + * dpu_hw_cdm_init - initializes the cdm hw driver object. + * should be called once before accessing every cdm. + * @dev: DRM device handle + * @cfg: CDM catalog entry for which driver object is required + * @addr : mapped register io address of MDSS + * @mdss_rev: mdss hw core revision + */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h index ec71c9886d75..6bb3476a05f8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h @@ -122,14 +122,6 @@ struct dpu_hw_cdm { struct dpu_hw_cdm_ops ops; }; -/** - * dpu_hw_cdm_init - initializes the cdm hw driver object. - * should be called once before accessing every cdm. - * @dev: DRM device handle - * @cdm: CDM catalog entry for which driver object is required - * @addr : mapped register io address of MDSS - * @mdss_rev: mdss hw core revision - */ struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, const struct dpu_cdm_cfg *cdm, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 2e50049f2f85..4893f10d6a58 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -736,6 +736,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; }; +/** + * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. + * Should be called before accessing any ctl_path register. + * @dev: Corresponding device for devres management + * @cfg: ctl_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mixer_count: Number of mixers in @mixer + * @mixer: Pointer to an array of Layer Mixers defined in the catalog + */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 4401fdc0f3e4..85c6c835cc87 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -294,15 +294,6 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_ctl, base); } -/** - * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object. - * Should be called before accessing any ctl_path register. - * @dev: Corresponding device for devres management - * @cfg: ctl_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mixer_count: Number of mixers in @mixer - * @mixer: Pointer to an array of Layer Mixers defined in the catalog - */ struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev, const struct dpu_ctl_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 5e9aad1b2aa2..657200401f57 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -190,6 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk; }; +/** + * dpu_hw_dsc_init() - Initializes the DSC hw driver object. + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h index 989c88d2449b..fc171bdeca48 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -62,24 +62,10 @@ struct dpu_hw_dsc { struct dpu_hw_dsc_ops ops; }; -/** - * dpu_hw_dsc_init() - Initializes the DSC hw driver object. - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); -/** - * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object - * @dev: Corresponding device for devres management - * @cfg: DSC catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Returns: Error code or allocated dpu_hw_dsc context - */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c index ba193b0376fe..b9c433567262 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c @@ -369,6 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops, ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2; } +/** + * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object + * @dev: Corresponding device for devres management + * @cfg: DSC catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Returns: Error code or allocated dpu_hw_dsc context + */ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev, const struct dpu_dsc_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index b1da88e2935f..829ca272873e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -70,6 +70,14 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c, c->ops.setup_pcc = dpu_setup_dspp_pcc; } +/** + * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. + * should be called once before accessing every DSPP. + * @dev: Corresponding device for devres management + * @cfg: DSPP catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: pointer to structure or ERR_PTR + */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h index 3b435690b6cc..45c26cd49fa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h @@ -78,14 +78,6 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_dspp, base); } -/** - * dpu_hw_dspp_init() - Initializes the DSPP hw driver object. - * should be called once before accessing every DSPP. - * @dev: Corresponding device for devres management - * @cfg: DSPP catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: pointer to structure or ERR_PTR - */ struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev, const struct dpu_dspp_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index b85881aab047..49bd77a755aa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -237,6 +237,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_entry->cb(irq_entry->arg); } +/** + * dpu_core_irq - core IRQ handler + * @kms: MSM KMS handle + * @return: interrupt handling status + */ irqreturn_t dpu_core_irq(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -442,6 +447,12 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) wmb(); } +/** + * dpu_core_irq_read - IRQ helper function for reading IRQ status + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: non-zero if irq detected; otherwise no irq detected + */ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -476,6 +487,12 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, return intr_status; } +/** + * dpu_hw_intr_init(): Initializes the interrupts hw object + * @dev: Corresponding device for devres management + * @addr: mapped register io address of MDP + * @m: pointer to MDSS catalog data + */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m) @@ -517,6 +534,17 @@ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, return intr; } +/** + * dpu_core_irq_register_callback - For registering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @irq_cb: IRQ callback function. + * @irq_arg: IRQ callback argument. + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), @@ -567,6 +595,15 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, return 0; } +/** + * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx) { @@ -628,6 +665,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); +/** + * dpu_debugfs_core_irq_init - register core irq debugfs + * @dpu_kms: pointer to kms + * @parent: debugfs directory root + */ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent) { @@ -636,6 +678,11 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, } #endif +/** + * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_preinstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -653,6 +700,11 @@ void dpu_core_irq_preinstall(struct msm_kms *kms) } } +/** + * dpu_core_irq_uninstall - uninstall core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ void dpu_core_irq_uninstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 564b750a28fe..142358a105c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -68,12 +68,6 @@ struct dpu_hw_intr { struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS]; }; -/** - * dpu_hw_intr_init(): Initializes the interrupts hw object - * @dev: Corresponding device for devres management - * @addr: mapped register io address of MDP - * @m: pointer to MDSS catalog data - */ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, void __iomem *addr, const struct dpu_mdss_cfg *m); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 29cb854f831a..fb1d25baa518 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -547,6 +547,14 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf, DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2); } +/** + * dpu_hw_intf_init() - Initializes the INTF driver for the passed + * interface catalog entry. + * @dev: Corresponding device for devres management + * @cfg: interface catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index fc23650dfbf0..114be272ac0a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -130,14 +130,6 @@ struct dpu_hw_intf { struct dpu_hw_intf_ops ops; }; -/** - * dpu_hw_intf_init() - Initializes the INTF driver for the passed - * interface catalog entry. - * @dev: Corresponding device for devres management - * @cfg: interface catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, const struct dpu_intf_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 1d3ccf3228c6..81b56f066519 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -158,6 +158,13 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops, ops->collect_misr = dpu_hw_lm_collect_misr; } +/** + * dpu_hw_lm_init() - Initializes the mixer hw driver object. + * should be called once before accessing every mixer. + * @dev: Corresponding device for devres management + * @cfg: mixer catalog entry for which driver object is required + * @addr: mapped register io address of MDP + */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 0a3381755249..6f60fa9b3cd7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -93,13 +93,6 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_mixer, base); } -/** - * dpu_hw_lm_init() - Initializes the mixer hw driver object. - * should be called once before accessing every mixer. - * @dev: Corresponding device for devres management - * @cfg: mixer catalog entry for which driver object is required - * @addr: mapped register io address of MDP - */ struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev, const struct dpu_lm_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index a2eff36a2224..f8806a4d317b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -293,7 +293,6 @@ enum dpu_3d_blend_mode { /** * struct dpu_hw_fmt_layout - format information of the source pixel data - * @format: pixel format parameters * @num_planes: number of planes (including meta data planes) * @width: image width * @height: image height @@ -303,7 +302,6 @@ enum dpu_3d_blend_mode { * @plane_pitch: pitch of each plane */ struct dpu_hw_fmt_layout { - const struct msm_format *format; uint32_t num_planes; uint32_t width; uint32_t height; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c index ddfa40a959cb..0b3325f9c870 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c @@ -39,6 +39,14 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c, c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; }; +/** + * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed + * merge3d catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * Return: Error code or allocated dpu_hw_merge_3d context + */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h index c192f02ec1ab..6833c0207523 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h @@ -45,14 +45,6 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_merge_3d, base); } -/** - * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed - * merge3d catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * Return: Error code or allocated dpu_hw_merge_3d context - */ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev, const struct dpu_merge_3d_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index 2db4c6fba37a..36c0ec775b92 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -283,6 +283,15 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp) return 0; } +/** + * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed + * pingpong catalog entry. + * @dev: Corresponding device for devres management + * @cfg: Pingpong catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_pingpong context + */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index a48b69fd79a3..dd99e1c21a1e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -118,15 +118,6 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) return container_of(hw, struct dpu_hw_pingpong, base); } -/** - * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed - * pingpong catalog entry. - * @dev: Corresponding device for devres management - * @cfg: Pingpong catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_pingpong context - */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev, const struct dpu_pingpong_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 2c720f1fc1b2..32c7c8084553 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -672,6 +672,15 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms, } #endif +/** + * dpu_hw_sspp_init() - Initializes the sspp hw driver object. + * Should be called once before accessing every pipe. + * @dev: Corresponding device for devres management + * @cfg: Pipe catalog entry for which driver object is required + * @addr: Mapped register io address of MDP + * @mdss_data: UBWC / MDSS configuration data + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 4a910b808687..56a0edf2a57c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -12,6 +12,8 @@ struct dpu_hw_sspp; +#define DPU_SSPP_MAX_PITCH_SIZE 0xffff + /** * Flags */ @@ -142,10 +144,12 @@ struct dpu_hw_pixel_ext { * @src_rect: src ROI, caller takes into account the different operations * such as decimation, flip etc to program this field * @dest_rect: destination ROI. + * @rotation: simplified drm rotation hint */ struct dpu_sw_pipe_cfg { struct drm_rect src_rect; struct drm_rect dst_rect; + unsigned int rotation; }; /** @@ -315,15 +319,7 @@ struct dpu_hw_sspp { }; struct dpu_kms; -/** - * dpu_hw_sspp_init() - Initializes the sspp hw driver object. - * Should be called once before accessing every pipe. - * @dev: Corresponding device for devres management - * @cfg: Pipe catalog entry for which driver object is required - * @addr: Mapped register io address of MDP - * @mdss_data: UBWC / MDSS configuration data - * @mdss_rev: dpu core's major and minor versions - */ + struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev, const struct dpu_sspp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index 0f40eea7f5e2..ad19330de61a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -284,6 +284,13 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, ops->intf_audio_select = dpu_hw_intf_audio_select; } +/** + * dpu_hw_mdptop_init - initializes the top driver for the passed config + * @dev: Corresponding device for devres management + * @cfg: MDP TOP configuration from catalog + * @addr: Mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h index f1ab9fd106e5..04efdcd21ceb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h @@ -157,18 +157,9 @@ struct dpu_hw_mdp { struct dpu_hw_mdp_ops ops; }; -/** - * dpu_hw_mdptop_init - initializes the top driver for the passed config - * @dev: Corresponding device for devres management - * @cfg: MDP TOP configuration from catalog - * @addr: Mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - */ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev, const struct dpu_mdp_cfg *cfg, void __iomem *addr, const struct dpu_mdss_version *mdss_rev); -void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); - #endif /*_DPU_HW_TOP_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index 98e34afde2d2..af76ad8a8103 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -213,6 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops, ops->set_write_gather_en = dpu_hw_set_write_gather_en; } +/** + * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed + * VBIF catalog entry. + * @dev: Corresponding device for devres management + * @cfg: VBIF catalog entry for which driver object is required + * @addr: Mapped register io address of MDSS + */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h index e2b4307500e4..285121ec804c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -105,13 +105,6 @@ struct dpu_hw_vbif { struct dpu_hw_vbif_ops ops; }; -/** - * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed - * VBIF catalog entry. - * @dev: Corresponding device for devres management - * @cfg: VBIF catalog entry for which driver object is required - * @addr: Mapped register io address of MDSS - */ struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, const struct dpu_vbif_cfg *cfg, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index 93ff01c889b5..fb9f90957762 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -64,10 +64,10 @@ static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx, } static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *data) + struct dpu_hw_wb_cfg *data, + const struct msm_format *fmt) { struct dpu_hw_blk_reg_map *c = &ctx->hw; - const struct msm_format *fmt = data->dest.format; u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp; u32 write_config = 0; u32 opmode = 0; @@ -213,6 +213,14 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops, ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl; } +/** + * dpu_hw_wb_init() - Initializes the writeback hw driver object. + * @dev: Corresponding device for devres management + * @cfg: wb_path catalog entry for which driver object is required + * @addr: mapped register io address of MDP + * @mdss_rev: dpu core's major and minor versions + * Return: Error code or allocated dpu_hw_wb context + */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h index 37497473e16c..ee5e5ab786e1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h @@ -37,7 +37,8 @@ struct dpu_hw_wb_ops { struct dpu_hw_wb_cfg *wb); void (*setup_outformat)(struct dpu_hw_wb *ctx, - struct dpu_hw_wb_cfg *wb); + struct dpu_hw_wb_cfg *wb, + const struct msm_format *fmt); void (*setup_roi)(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb); @@ -74,14 +75,6 @@ struct dpu_hw_wb { struct dpu_hw_wb_ops ops; }; -/** - * dpu_hw_wb_init() - Initializes the writeback hw driver object. - * @dev: Corresponding device for devres management - * @cfg: wb_path catalog entry for which driver object is required - * @addr: mapped register io address of MDP - * @mdss_rev: dpu core's major and minor versions - * Return: Error code or allocated dpu_hw_wb context - */ struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev, const struct dpu_wb_cfg *cfg, void __iomem *addr, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 9bcae53c4f45..ca4847b2b738 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -230,6 +230,21 @@ static int dpu_regset32_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(dpu_regset32); +/** + * dpu_debugfs_create_regset32 - Create register read back file for debugfs + * + * This function is almost identical to the standard debugfs_create_regset32() + * function, with the main difference being that a list of register + * names/offsets do not need to be provided. The 'read' function simply outputs + * sequential register values over a specified range. + * + * @name: File name within debugfs + * @mode: File mode within debugfs + * @parent: Parent directory entry within debugfs, can be NULL + * @offset: sub-block offset + * @length: sub-block length, in bytes + * @dpu_kms: pointer to dpu kms structure + */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) @@ -1025,7 +1040,6 @@ static const struct msm_kms_funcs kms_funcs = { .complete_commit = dpu_kms_complete_commit, .enable_vblank = dpu_kms_enable_vblank, .disable_vblank = dpu_kms_disable_vblank, - .check_modified_format = dpu_format_check_modified_format, .destroy = dpu_kms_destroy, .snapshot = dpu_kms_mdp_snapshot, #ifdef CONFIG_DEBUG_FS @@ -1061,6 +1075,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return 0; } +/** + * dpu_kms_get_clk_rate() - get the clock rate + * @dpu_kms: pointer to dpu_kms structure + * @clock_name: clock name to get the rate + * + * Return: current clock rate + */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) { struct clk *clk; @@ -1202,13 +1223,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - /* - * max crtc width is equal to the max mixer width * 2 and max height is - * is 4K - */ - dev->mode_config.max_width = - dpu_kms->catalog->caps->max_mixer_width * 2; - dev->mode_config.max_height = 4096; + dev->mode_config.max_width = DPU_MAX_IMG_WIDTH; + dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT; dev->max_vblank_count = 0xffffffff; /* Disable vblank irqs aggressively for power-saving */ @@ -1445,8 +1461,13 @@ static const struct dev_pm_ops dpu_pm_ops = { }; static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, }, + { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, }, + { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, }, + { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, }, { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, + { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, }, { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, }, { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, }, { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 935ff6fd172c..88d64d43ea1a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -145,38 +145,11 @@ struct dpu_global_state * @dpu_debugfs_create_regset32: Create 32-bit register dump file */ -/** - * dpu_debugfs_create_regset32 - Create register read back file for debugfs - * - * This function is almost identical to the standard debugfs_create_regset32() - * function, with the main difference being that a list of register - * names/offsets do not need to be provided. The 'read' function simply outputs - * sequential register values over a specified range. - * - * @name: File name within debugfs - * @mode: File mode within debugfs - * @parent: Parent directory entry within debugfs, can be NULL - * @offset: sub-block offset - * @length: sub-block length, in bytes - * @dpu_kms: pointer to dpu kms structure - */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); /** - * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs - * - * The return value should be passed as the 'parent' argument to subsequent - * debugfs create calls. - * - * @dpu_kms: Pointer to DPU's KMS structure - * - * Return: dentry pointer for DPU's debugfs location - */ -void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); - -/** * DPU info management functions * These functions/definitions allow for building up a 'dpu_info' structure * containing one or more "key=value\n" entries. @@ -189,13 +162,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -/** - * dpu_kms_get_clk_rate() - get the clock rate - * @dpu_kms: pointer to dpu_kms structure - * @clock_name: clock name to get the rate - * - * Return: current clock rate - */ unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name); #endif /* __dpu_kms_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 29298e066163..3ffac24333a2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -528,8 +528,7 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, const struct msm_format *fmt, bool color_fill, - struct dpu_sw_pipe_cfg *pipe_cfg, - unsigned int rotation) + struct dpu_sw_pipe_cfg *pipe_cfg) { struct dpu_hw_sspp *pipe_hw = pipe->sspp; const struct drm_format_info *info = drm_format_info(fmt->pixel_format); @@ -552,7 +551,7 @@ static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, dst_height, &scaler3_cfg, fmt, info->hsub, info->vsub, - rotation); + pipe_cfg->rotation); /* configure pixel extension based on scalar config */ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext, @@ -604,7 +603,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate, if (pipe->sspp->ops.setup_rects) pipe->sspp->ops.setup_rects(pipe, &pipe_cfg); - _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg); } /** @@ -648,7 +647,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, struct drm_framebuffer *fb = new_state->fb; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); - struct dpu_hw_fmt_layout layout; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); int ret; @@ -676,17 +674,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, } } - /* validate framebuffer layout before commit */ - ret = dpu_format_populate_layout(pstate->aspace, - new_state->fb, &layout); - if (ret) { - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - if (pstate->aspace) - msm_framebuffer_cleanup(new_state->fb, pstate->aspace, - pstate->needs_dirtyfb); - return ret; - } - return 0; } @@ -708,12 +695,17 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane, } static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, - const struct dpu_sspp_sub_blks *sblk, - struct drm_rect src, const struct msm_format *fmt) + struct dpu_sw_pipe *pipe, + struct drm_rect src, + const struct msm_format *fmt) { + const struct dpu_sspp_sub_blks *sblk = pipe->sspp->cap->sblk; size_t num_formats; const u32 *supported_formats; + if (!test_bit(DPU_SSPP_INLINE_ROTATION, &pipe->sspp->cap->features)) + return -EINVAL; + if (!sblk->rotation_cfg) { DPU_ERROR("invalid rotation cfg\n"); return -EINVAL; @@ -743,6 +735,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + int ret; min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -780,6 +773,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return -EINVAL; } + if (pipe_cfg->rotation & DRM_MODE_ROTATE_90) { + ret = dpu_plane_check_inline_rotation(pdpu, pipe, pipe_cfg->src_rect, fmt); + if (ret) + return ret; + } + /* max clk check */ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) { DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n"); @@ -789,37 +788,29 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, return 0; } -static int dpu_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +#define MAX_UPSCALE_RATIO 20 +#define MAX_DOWNSCALE_RATIO 4 + +static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + const struct drm_crtc_state *crtc_state) { - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0, min_scale; + int i, ret = 0, min_scale, max_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); - struct dpu_sw_pipe *pipe = &pstate->pipe; - struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; - const struct drm_crtc_state *crtc_state = NULL; - const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; struct drm_rect fb_rect = { 0 }; uint32_t max_linewidth; - unsigned int rotation; - uint32_t supported_rotations; - const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap; - const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk; - if (new_plane_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, - new_plane_state->crtc); + min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); + max_scale = MAX_DOWNSCALE_RATIO << 16; - min_scale = FRAC_16_16(1, sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, - sblk->maxdwnscale << 16, + max_scale, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); @@ -828,12 +819,6 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->visible) return 0; - pipe->multirect_index = DPU_SSPP_RECT_SOLO; - pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; - r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; - r_pipe->sspp = NULL; - pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos; if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) { DPU_ERROR("> %d plane stages assigned\n", @@ -841,13 +826,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - pipe_cfg->src_rect = new_plane_state->src; - /* state->src is 16.16, src_rect is not */ - pipe_cfg->src_rect.x1 >>= 16; - pipe_cfg->src_rect.x2 >>= 16; - pipe_cfg->src_rect.y1 >>= 16; - pipe_cfg->src_rect.y2 >>= 16; + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); pipe_cfg->dst_rect = new_plane_state->dst; @@ -855,14 +835,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, fb_rect.y2 = new_plane_state->fb->height; /* Ensure fb size is supported */ - if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH || - drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) { + if (drm_rect_width(&fb_rect) > DPU_MAX_IMG_WIDTH || + drm_rect_height(&fb_rect) > DPU_MAX_IMG_HEIGHT) { DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n", DRM_RECT_ARG(&fb_rect)); return -E2BIG; } - fmt = msm_framebuffer_format(new_plane_state->fb); + ret = dpu_format_populate_plane_sizes(new_plane_state->fb, &pstate->layout); + if (ret) { + DPU_ERROR_PLANE(pdpu, "failed to get format plane sizes, %d\n", ret); + return ret; + } + + for (i = 0; i < pstate->layout.num_planes; i++) + if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE) + return -E2BIG; max_linewidth = pdpu->catalog->caps->max_linewidth; @@ -872,6 +860,86 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); + return -E2BIG; + } + + *r_pipe_cfg = *pipe_cfg; + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; + } else { + memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); + } + + drm_rect_rotate_inv(&pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + if (r_pipe_cfg->src_rect.x1 != 0) + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, + new_plane_state->fb->width, new_plane_state->fb->height, + new_plane_state->rotation); + + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + + return 0; +} + +static int dpu_plane_atomic_check_sspp(struct drm_plane *plane, + struct drm_atomic_state *state, + const struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct msm_format *fmt; + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; + uint32_t max_linewidth; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps; + const struct dpu_sspp_sub_blks *sblk; + int ret = 0; + + pipe_hw_caps = pipe->sspp->cap; + sblk = pipe->sspp->cap->sblk; + + /* + * We already have verified scaling against platform limitations. + * Now check if the SSPP supports scaling at all. + */ + if (!sblk->scaler_blk.len && + ((drm_rect_width(&new_plane_state->src) >> 16 != + drm_rect_width(&new_plane_state->dst)) || + (drm_rect_height(&new_plane_state->src) >> 16 != + drm_rect_height(&new_plane_state->dst)))) + return -ERANGE; + + fmt = msm_framebuffer_format(new_plane_state->fb); + + max_linewidth = pdpu->catalog->caps->max_linewidth; + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); + r_pipe_cfg->rotation = pipe_cfg->rotation; + + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, + &crtc_state->adjusted_mode); + if (ret) + return ret; + + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { /* * In parallel multirect case only the half of the usual width * is supported for tiled formats. If we are here, we know that @@ -885,16 +953,11 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -E2BIG; } - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); - return -E2BIG; - } - if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) || drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) || (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) && !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) || + pipe_cfg->rotation & DRM_MODE_ROTATE_90 || MSM_FORMAT_IS_YUV(fmt)) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); @@ -912,51 +975,48 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, r_pipe->multirect_index = DPU_SSPP_RECT_1; r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; - *r_pipe_cfg = *pipe_cfg; - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; - } - - drm_rect_rotate_inv(&pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - if (r_pipe->sspp) - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, - new_plane_state->fb->width, new_plane_state->fb->height, - new_plane_state->rotation); - - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode); - if (ret) - return ret; - - if (r_pipe->sspp) { ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt, &crtc_state->adjusted_mode); if (ret) return ret; } - supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + return 0; +} - if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) - supported_rotations |= DRM_MODE_ROTATE_90; +static int dpu_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + int ret = 0; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + struct dpu_sw_pipe *pipe = &pstate->pipe; + struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; + const struct drm_crtc_state *crtc_state = NULL; - rotation = drm_rotation_simplify(new_plane_state->rotation, - supported_rotations); + if (new_plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); - if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) && - (rotation & DRM_MODE_ROTATE_90)) { - ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt); - if (ret) - return ret; - } + pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); + r_pipe->sspp = NULL; - pstate->rotation = rotation; - pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state); + if (ret) + return ret; - return 0; + if (!new_plane_state->visible) + return 0; + + pipe->multirect_index = DPU_SSPP_RECT_SOLO; + pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + r_pipe->multirect_index = DPU_SSPP_RECT_SOLO; + r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE; + + return dpu_plane_atomic_check_sspp(plane, state, crtc_state); } static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) @@ -981,6 +1041,10 @@ static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe } +/** + * dpu_plane_flush - final plane operations before commit flush + * @plane: Pointer to drm plane structure + */ void dpu_plane_flush(struct drm_plane *plane) { struct dpu_plane *pdpu; @@ -1060,14 +1124,14 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, pipe_cfg); } - _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation); + _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg); if (pipe->sspp->ops.setup_multirect) pipe->sspp->ops.setup_multirect( pipe); if (pipe->sspp->ops.setup_format) { - unsigned int rotation = pstate->rotation; + unsigned int rotation = pipe_cfg->rotation; src_flags = 0x0; @@ -1101,7 +1165,8 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, _dpu_plane_set_qos_remap(plane, pipe); } -static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) +static void dpu_plane_sspp_atomic_update(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct drm_plane_state *state = plane->state; @@ -1115,17 +1180,6 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) msm_framebuffer_format(fb); struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; - struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); - struct msm_gem_address_space *aspace = kms->base.aspace; - struct dpu_hw_fmt_layout layout; - bool layout_valid = false; - int ret; - - ret = dpu_format_populate_layout(aspace, fb, &layout); - if (ret) - DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); - else - layout_valid = true; pstate->pending = true; @@ -1133,6 +1187,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe); pdpu->is_rt_pipe = is_rt_pipe; + dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout); + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), crtc->base.id, DRM_RECT_ARG(&state->dst), @@ -1140,12 +1196,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); if (r_pipe->sspp) { dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), - layout_valid ? &layout : NULL); + &pstate->layout); } if (pstate->needs_qos_remap) @@ -1197,7 +1253,7 @@ static void dpu_plane_atomic_update(struct drm_plane *plane, if (!new_state->visible) { _dpu_plane_atomic_disable(plane); } else { - dpu_plane_sspp_atomic_update(plane); + dpu_plane_sspp_atomic_update(plane, new_state); } } @@ -1301,7 +1357,6 @@ static void dpu_plane_reset(struct drm_plane *plane) { struct dpu_plane *pdpu; struct dpu_plane_state *pstate; - struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); if (!plane) { DPU_ERROR("invalid plane\n"); @@ -1323,16 +1378,6 @@ static void dpu_plane_reset(struct drm_plane *plane) return; } - /* - * Set the SSPP here until we have proper virtualized DPU planes. - * This is the place where the state is allocated, so fill it fully. - */ - pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe); - pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO; - pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE; - - pstate->r_pipe.sspp = NULL; - __drm_atomic_helper_plane_reset(plane, &pstate->base); } @@ -1388,7 +1433,15 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { .atomic_update = dpu_plane_atomic_update, }; -/* initialize plane */ +/** + * dpu_plane_init - create new dpu plane for the given pipe + * @dev: Pointer to DRM device + * @pipe: dpu hardware pipe identifier + * @type: Plane type - PRIMARY/OVERLAY/CURSOR + * @possible_crtcs: bitmask of crtc that can be attached to the given pipe + * + * Initialize the plane. + */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index abd6b21a049b..97090ca7842b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -30,7 +30,7 @@ * @plane_fetch_bw: calculated BW per plane * @plane_clk: calculated clk per plane * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed - * @rotation: simplified drm rotation hint + * @layout: framebuffer memory layout */ struct dpu_plane_state { struct drm_plane_state base; @@ -47,46 +47,21 @@ struct dpu_plane_state { u64 plane_clk; bool needs_dirtyfb; - unsigned int rotation; + + struct dpu_hw_fmt_layout layout; }; #define to_dpu_plane_state(x) \ container_of(x, struct dpu_plane_state, base) -/** - * dpu_plane_flush - final plane operations before commit flush - * @plane: Pointer to drm plane structure - */ void dpu_plane_flush(struct drm_plane *plane); -/** - * dpu_plane_set_error: enable/disable error condition - * @plane: pointer to drm_plane structure - */ void dpu_plane_set_error(struct drm_plane *plane, bool error); -/** - * dpu_plane_init - create new dpu plane for the given pipe - * @dev: Pointer to DRM device - * @pipe: dpu hardware pipe identifier - * @type: Plane type - PRIMARY/OVERLAY/CURSOR - * @possible_crtcs: bitmask of crtc that can be attached to the given pipe - * - */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, unsigned long possible_crtcs); -/** - * dpu_plane_color_fill - enables color fill on plane - * @plane: Pointer to DRM plane object - * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red - * @alpha: 8-bit fill alpha value, 255 selects 100% alpha - * Returns: 0 on success - */ -int dpu_plane_color_fill(struct drm_plane *plane, - uint32_t color, uint32_t alpha); - #ifdef CONFIG_DEBUG_FS void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable); #else diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 44938ba7a2b7..c247af03dc8e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -34,6 +34,16 @@ struct dpu_rm_requirements { struct msm_display_topology topology; }; +/** + * dpu_rm_init - Read hardware catalog and create reservation tracking objects + * for all HW blocks. + * @dev: Corresponding device for devres management + * @rm: DPU Resource Manager handle + * @cat: Pointer to hardware catalog + * @mdss_data: Pointer to MDSS / UBWC configuration + * @mmio: mapped register io address of MDP + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, @@ -641,6 +651,13 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, } } +/** + * dpu_rm_release - Given the encoder for the display chain, release any + * HW blocks previously reserved for that use case. + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @return: 0 on Success otherwise -ERROR + */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc) { @@ -657,6 +674,20 @@ void dpu_rm_release(struct dpu_global_state *global_state, _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); } +/** + * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze + * the use connections and user requirements, specified through related + * topology control properties, and reserve hardware blocks to that + * display chain. + * HW blocks can then be accessed through dpu_rm_get_* functions. + * HW Reservations should be released via dpu_rm_release_hw. + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc: DRM Encoder handle + * @crtc_state: Proposed Atomic DRM CRTC State handle + * @topology: Pointer to topology info for the display + * @return: 0 on Success otherwise -ERROR + */ int dpu_rm_reserve( struct dpu_rm *rm, struct dpu_global_state *global_state, @@ -694,6 +725,16 @@ int dpu_rm_reserve( return ret; } +/** + * dpu_rm_get_assigned_resources - Get hw resources of the given type that are + * assigned to this encoder + * @rm: DPU Resource Manager handle + * @global_state: resources shared across multiple kms objects + * @enc_id: encoder id requesting for allocation + * @type: resource type to return data for + * @blks: pointer to the array to be filled by HW resources + * @blks_size: size of the @blks array + */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) @@ -772,6 +813,11 @@ static void dpu_rm_print_state_helper(struct drm_printer *p, } +/** + * dpu_rm_print_state - output the RM private state + * @p: DRM printer + * @global_state: global state + */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index e63db8ace6b9..ea0e49cb7b0d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -38,62 +38,40 @@ struct dpu_rm { }; /** - * dpu_rm_init - Read hardware catalog and create reservation tracking objects - * for all HW blocks. - * @dev: Corresponding device for devres management - * @rm: DPU Resource Manager handle - * @cat: Pointer to hardware catalog - * @mdss_data: Pointer to MDSS / UBWC configuration - * @mmio: mapped register io address of MDP - * @Return: 0 on Success otherwise -ERROR + * struct msm_display_topology - defines a display topology pipeline + * @num_lm: number of layer mixers used + * @num_intf: number of interfaces the panel is mounted on + * @num_dspp: number of dspp blocks used + * @num_dsc: number of Display Stream Compression (DSC) blocks used + * @needs_cdm: indicates whether cdm block is needed for this display topology */ +struct msm_display_topology { + u32 num_lm; + u32 num_intf; + u32 num_dspp; + u32 num_dsc; + bool needs_cdm; +}; + int dpu_rm_init(struct drm_device *dev, struct dpu_rm *rm, const struct dpu_mdss_cfg *cat, const struct msm_mdss_data *mdss_data, void __iomem *mmio); -/** - * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze - * the use connections and user requirements, specified through related - * topology control properties, and reserve hardware blocks to that - * display chain. - * HW blocks can then be accessed through dpu_rm_get_* functions. - * HW Reservations should be released via dpu_rm_release_hw. - * @rm: DPU Resource Manager handle - * @drm_enc: DRM Encoder handle - * @crtc_state: Proposed Atomic DRM CRTC State handle - * @topology: Pointer to topology info for the display - * @Return: 0 on Success otherwise -ERROR - */ int dpu_rm_reserve(struct dpu_rm *rm, struct dpu_global_state *global_state, struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, struct msm_display_topology topology); -/** - * dpu_rm_reserve - Given the encoder for the display chain, release any - * HW blocks previously reserved for that use case. - * @rm: DPU Resource Manager handle - * @enc: DRM Encoder handle - * @Return: 0 on Success otherwise -ERROR - */ void dpu_rm_release(struct dpu_global_state *global_state, struct drm_encoder *enc); -/** - * Get hw resources of the given type that are assigned to this encoder. - */ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); -/** - * dpu_rm_print_state - output the RM private state - * @p: DRM printer - * @global_state: global state - */ void dpu_rm_print_state(struct drm_printer *p, const struct dpu_global_state *global_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 47c02b98eac3..2a551e455aa3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -204,6 +204,11 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); } +/** + * dpu_vbif_set_qos_remap - set QoS priority level remap + * @dpu_kms: DPU handler + * @params: Pointer to QoS configuration parameters + */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { @@ -245,6 +250,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } } +/** + * dpu_vbif_clear_errors - clear any vbif errors + * @dpu_kms: DPU handler + */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; @@ -262,6 +271,10 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) } } +/** + * dpu_vbif_init_memtypes - initialize xin memory types for vbif + * @dpu_kms: DPU handler + */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) { struct dpu_hw_vbif *vbif; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h index e1b1f7f4e4be..62e47ae1e3ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h @@ -38,32 +38,14 @@ struct dpu_vbif_set_qos_params { bool is_rt; }; -/** - * dpu_vbif_set_ot_limit - set OT limit for vbif client - * @dpu_kms: DPU handler - * @params: Pointer to OT configuration parameters - */ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params); -/** - * dpu_vbif_set_qos_remap - set QoS priority level remap - * @dpu_kms: DPU handler - * @params: Pointer to QoS configuration parameters - */ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params); -/** - * dpu_vbif_clear_errors - clear any vbif errors - * @dpu_kms: DPU handler - */ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms); -/** - * dpu_vbif_init_memtypes - initialize xin memory types for vbif - * @dpu_kms: DPU handler - */ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms); void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 4d55e3cf570f..07a2c1e87219 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -25,24 +25,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b addr = base_addr; end_addr = base_addr + aligned_len; - if (!(*reg)) - *reg = kvzalloc(len_padded, GFP_KERNEL); - - if (*reg) - dump_addr = *reg; + *reg = kvzalloc(len_padded, GFP_KERNEL); + if (!*reg) + return; + dump_addr = *reg; for (i = 0; i < num_rows; i++) { x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; - if (dump_addr) { - dump_addr[i * 4] = x0; - dump_addr[i * 4 + 1] = x4; - dump_addr[i * 4 + 2] = x8; - dump_addr[i * 4 + 3] = xc; - } + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; addr += REG_DUMP_ALIGN; } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index a599fc5d63c5..74e01a5dd419 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -17,281 +17,281 @@ #include "dp_display.h" #include "dp_utils.h" -struct dp_audio_private { +struct msm_dp_audio_private { struct platform_device *audio_pdev; struct platform_device *pdev; struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; u32 channels; - struct dp_audio dp_audio; + struct msm_dp_audio msm_dp_audio; }; -static u32 dp_audio_get_header(struct dp_catalog *catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - return dp_catalog_audio_get_header(catalog, sdp, header); + return msm_dp_catalog_audio_get_header(catalog, sdp, header); } -static void dp_audio_set_header(struct dp_catalog *catalog, +static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog, u32 data, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - dp_catalog_audio_set_header(catalog, sdp, header, data); + msm_dp_catalog_audio_set_header(catalog, sdp, header, data); } -static void dp_audio_stream_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); new_value = 0x02; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); new_value = value; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); new_value = audio->channels - 1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); new_value = 0x1; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); new_value = 0x17; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); new_value = 0x84; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); new_value = 0x1b; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); new_value = (0x0 | (0x11 << 2)); - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", new_value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); new_value = 0x05; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); /* Config header and parity byte 3 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); new_value = 0x0; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_3_BIT) | (parity_byte << PARITY_BYTE_3_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); } -static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 value, new_value; u8 parity_byte; /* Config header and parity byte 1 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); new_value = 0x06; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_1_BIT) | (parity_byte << PARITY_BYTE_1_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); /* Config header and parity byte 2 */ - value = dp_audio_get_header(catalog, + value = msm_dp_audio_get_header(catalog, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); new_value = 0x0F; - parity_byte = dp_utils_calculate_parity(new_value); + parity_byte = msm_dp_utils_calculate_parity(new_value); value |= ((new_value << HEADER_BYTE_2_BIT) | (parity_byte << PARITY_BYTE_2_BIT)); drm_dbg_dp(audio->drm_dev, "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", value, parity_byte); - dp_audio_set_header(catalog, value, + msm_dp_audio_set_header(catalog, value, DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); } -static void dp_audio_setup_sdp(struct dp_audio_private *audio) +static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio) { - dp_catalog_audio_config_sdp(audio->catalog); + msm_dp_catalog_audio_config_sdp(audio->catalog); - dp_audio_stream_sdp(audio); - dp_audio_timestamp_sdp(audio); - dp_audio_infoframe_sdp(audio); - dp_audio_copy_management_sdp(audio); - dp_audio_isrc_sdp(audio); + msm_dp_audio_stream_sdp(audio); + msm_dp_audio_timestamp_sdp(audio); + msm_dp_audio_infoframe_sdp(audio); + msm_dp_audio_copy_management_sdp(audio); + msm_dp_audio_isrc_sdp(audio); } -static void dp_audio_setup_acr(struct dp_audio_private *audio) +static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio) { u32 select = 0; - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - switch (audio->dp_audio.bw_code) { + switch (audio->msm_dp_audio.bw_code) { case DP_LINK_BW_1_62: select = 0; break; @@ -310,15 +310,15 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio) break; } - dp_catalog_audio_config_acr(catalog, select); + msm_dp_catalog_audio_config_acr(catalog, select); } -static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; u32 safe_to_exit_level = 0; - switch (audio->dp_audio.lane_count) { + switch (audio->msm_dp_audio.lane_count) { case 1: safe_to_exit_level = 14; break; @@ -336,49 +336,49 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) break; } - dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); + msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); } -static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable) { - struct dp_catalog *catalog = audio->catalog; + struct msm_dp_catalog *catalog = audio->catalog; - dp_catalog_audio_enable(catalog, enable); + msm_dp_catalog_audio_enable(catalog, enable); } -static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev) { - struct dp_audio *dp_audio; - struct msm_dp *dp_display; + struct msm_dp_audio *msm_dp_audio; + struct msm_dp *msm_dp_display; if (!pdev) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return ERR_PTR(-ENODEV); } - dp_audio = dp_display->dp_audio; + msm_dp_audio = msm_dp_display->msm_dp_audio; - if (!dp_audio) { - DRM_ERROR("invalid dp_audio data\n"); + if (!msm_dp_audio) { + DRM_ERROR("invalid msm_dp_audio data\n"); return ERR_PTR(-EINVAL); } - return container_of(dp_audio, struct dp_audio_private, dp_audio); + return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); } -static int dp_audio_hook_plugged_cb(struct device *dev, void *data, +static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); if (!pdev) { @@ -386,20 +386,20 @@ static int dp_audio_hook_plugged_cb(struct device *dev, void *data, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { pr_err("invalid input\n"); return -ENODEV; } - return dp_display_set_plugged_cb(dp_display, fn, codec_dev); + return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev); } -static int dp_audio_get_eld(struct device *dev, +static int msm_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); @@ -408,30 +408,30 @@ static int dp_audio_get_eld(struct device *dev, return -ENODEV; } - dp_display = platform_get_drvdata(pdev); - if (!dp_display) { + msm_dp_display = platform_get_drvdata(pdev); + if (!msm_dp_display) { DRM_ERROR("invalid input\n"); return -ENODEV; } - memcpy(buf, dp_display->connector->eld, - min(sizeof(dp_display->connector->eld), len)); + memcpy(buf, msm_dp_display->connector->eld, + min(sizeof(msm_dp_display->connector->eld), len)); return 0; } -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { int rc = 0; - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); + msm_dp_display = platform_get_drvdata(pdev); /* * there could be cases where sound card can be opened even @@ -441,12 +441,12 @@ int dp_audio_hw_params(struct device *dev, * such cases check for connection status and bail out if not * connected. */ - if (!dp_display->power_on) { + if (!msm_dp_display->power_on) { rc = -EINVAL; goto end; } - audio = dp_audio_get_data(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { rc = PTR_ERR(audio); goto end; @@ -454,26 +454,26 @@ int dp_audio_hw_params(struct device *dev, audio->channels = params->channels; - dp_audio_setup_sdp(audio); - dp_audio_setup_acr(audio); - dp_audio_safe_to_exit_level(audio); - dp_audio_enable(audio, true); - dp_display_signal_audio_start(dp_display); - dp_display->audio_enabled = true; + msm_dp_audio_setup_sdp(audio); + msm_dp_audio_setup_acr(audio); + msm_dp_audio_safe_to_exit_level(audio); + msm_dp_audio_enable(audio, true); + msm_dp_display_signal_audio_start(msm_dp_display); + msm_dp_display->audio_enabled = true; end: return rc; } -static void dp_audio_shutdown(struct device *dev, void *data) +static void msm_dp_audio_shutdown(struct device *dev, void *data) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; struct platform_device *pdev; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; pdev = to_platform_device(dev); - dp_display = platform_get_drvdata(pdev); - audio = dp_audio_get_data(pdev); + msm_dp_display = platform_get_drvdata(pdev); + audio = msm_dp_audio_get_data(pdev); if (IS_ERR(audio)) { DRM_ERROR("failed to get audio data\n"); return; @@ -487,32 +487,32 @@ static void dp_audio_shutdown(struct device *dev, void *data) * connected. is_connected cannot be used here as its set * to false earlier than this call */ - if (!dp_display->audio_enabled) + if (!msm_dp_display->audio_enabled) return; - dp_audio_enable(audio, false); + msm_dp_audio_enable(audio, false); /* signal the dp display to safely shutdown clocks */ - dp_display_signal_audio_complete(dp_display); + msm_dp_display_signal_audio_complete(msm_dp_display); } -static const struct hdmi_codec_ops dp_audio_codec_ops = { - .hw_params = dp_audio_hw_params, - .audio_shutdown = dp_audio_shutdown, - .get_eld = dp_audio_get_eld, - .hook_plugged_cb = dp_audio_hook_plugged_cb, +static const struct hdmi_codec_ops msm_dp_audio_codec_ops = { + .hw_params = msm_dp_audio_hw_params, + .audio_shutdown = msm_dp_audio_shutdown, + .get_eld = msm_dp_audio_get_eld, + .hook_plugged_cb = msm_dp_audio_hook_plugged_cb, }; static struct hdmi_codec_pdata codec_data = { - .ops = &dp_audio_codec_ops, + .ops = &msm_dp_audio_codec_ops, .max_i2s_channels = 8, .i2s = 1, }; -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); if (audio_priv->audio_pdev) { platform_device_unregister(audio_priv->audio_pdev); @@ -520,13 +520,13 @@ void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) } } -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio) +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio_priv; + struct msm_dp_audio_private *audio_priv; - audio_priv = container_of(dp_audio, - struct dp_audio_private, dp_audio); + audio_priv = container_of(msm_dp_audio, + struct msm_dp_audio_private, msm_dp_audio); audio_priv->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, @@ -536,13 +536,13 @@ int dp_register_audio_driver(struct device *dev, return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); } -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog) +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog) { int rc = 0; - struct dp_audio_private *audio; - struct dp_audio *dp_audio; + struct msm_dp_audio_private *audio; + struct msm_dp_audio *msm_dp_audio; if (!pdev || !panel || !catalog) { DRM_ERROR("invalid input\n"); @@ -559,23 +559,23 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, audio->pdev = pdev; audio->catalog = catalog; - dp_audio = &audio->dp_audio; + msm_dp_audio = &audio->msm_dp_audio; - dp_catalog_audio_init(catalog); + msm_dp_catalog_audio_init(catalog); - return dp_audio; + return msm_dp_audio; error: return ERR_PTR(rc); } -void dp_audio_put(struct dp_audio *dp_audio) +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio) { - struct dp_audio_private *audio; + struct msm_dp_audio_private *audio; - if (!dp_audio) + if (!msm_dp_audio) return; - audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + audio = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); devm_kfree(&audio->pdev->dev, audio); } diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 4ab78880af82..1c9efaaa40e5 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -13,58 +13,58 @@ #include <sound/hdmi-codec.h> /** - * struct dp_audio + * struct msm_dp_audio * @lane_count: number of lanes configured in current session * @bw_code: link rate's bandwidth code for current session */ -struct dp_audio { +struct msm_dp_audio { u32 lane_count; u32 bw_code; }; /** - * dp_audio_get() + * msm_dp_audio_get() * * Creates and instance of dp audio. * * @pdev: caller's platform device instance. - * @panel: an instance of dp_panel module. - * @catalog: an instance of dp_catalog module. + * @panel: an instance of msm_dp_panel module. + * @catalog: an instance of msm_dp_catalog module. * * Returns the error code in case of failure, otherwize - * an instance of newly created dp_module. + * an instance of newly created msm_dp_module. */ -struct dp_audio *dp_audio_get(struct platform_device *pdev, - struct dp_panel *panel, - struct dp_catalog *catalog); +struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, + struct msm_dp_panel *panel, + struct msm_dp_catalog *catalog); /** - * dp_register_audio_driver() + * msm_dp_register_audio_driver() * * Registers DP device with hdmi_codec interface. * * @dev: DP device instance. - * @dp_audio: an instance of dp_audio module. + * @msm_dp_audio: an instance of msm_dp_audio module. * * * Returns the error code in case of failure, otherwise * zero on success. */ -int dp_register_audio_driver(struct device *dev, - struct dp_audio *dp_audio); +int msm_dp_register_audio_driver(struct device *dev, + struct msm_dp_audio *msm_dp_audio); -void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); +void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio); /** - * dp_audio_put() + * msm_dp_audio_put() * - * Cleans the dp_audio instance. + * Cleans the msm_dp_audio instance. * - * @dp_audio: an instance of dp_audio. + * @msm_dp_audio: an instance of msm_dp_audio. */ -void dp_audio_put(struct dp_audio *dp_audio); +void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); -int dp_audio_hw_params(struct device *dev, +int msm_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 00dfafbebe0e..bc8d46abfc61 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -20,9 +20,9 @@ enum msm_dp_aux_err { DP_AUX_ERR_PHY, }; -struct dp_aux_private { +struct msm_dp_aux_private { struct device *dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -42,12 +42,12 @@ struct dp_aux_private { u32 offset; u32 segment; - struct drm_dp_aux dp_aux; + struct drm_dp_aux msm_dp_aux; }; #define MAX_AUX_RETRIES 5 -static ssize_t dp_aux_write(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u8 data[4]; @@ -88,11 +88,11 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, /* index = 0, write */ if (i == 0) reg |= DP_AUX_DATA_INDEX_WRITE; - dp_catalog_aux_write_data(aux->catalog, reg); + msm_dp_catalog_aux_write_data(aux->catalog, reg); } - dp_catalog_aux_clear_trans(aux->catalog, false); - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_trans(aux->catalog, false); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); reg = 0; /* Transaction number == 1 */ if (!aux->native) { /* i2c */ @@ -106,12 +106,12 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, } reg |= DP_AUX_TRANS_CTRL_GO; - dp_catalog_aux_write_trans(aux->catalog, reg); + msm_dp_catalog_aux_write_trans(aux->catalog, reg); return len; } -static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { ssize_t ret; @@ -119,7 +119,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, reinit_completion(&aux->comp); - ret = dp_aux_write(aux, msg); + ret = msm_dp_aux_write(aux, msg); if (ret < 0) return ret; @@ -131,7 +131,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, return ret; } -static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, +static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *msg) { u32 data; @@ -139,20 +139,20 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, u32 i, actual_i; u32 len = msg->size; - dp_catalog_aux_clear_trans(aux->catalog, true); + msm_dp_catalog_aux_clear_trans(aux->catalog, true); data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ data |= DP_AUX_DATA_READ; /* read */ - dp_catalog_aux_write_data(aux->catalog, data); + msm_dp_catalog_aux_write_data(aux->catalog, data); dp = msg->buffer; /* discard first byte */ - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); for (i = 0; i < len; i++) { - data = dp_catalog_aux_read_data(aux->catalog); + data = msm_dp_catalog_aux_read_data(aux->catalog); *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; @@ -163,7 +163,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, return i; } -static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, +static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg) { u32 edid_address = 0x50; @@ -185,7 +185,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, } /** - * dp_aux_transfer_helper() - helper function for EDID read transactions + * msm_dp_aux_transfer_helper() - helper function for EDID read transactions * * @aux: DP AUX private structure * @input_msg: input message from DRM upstream APIs @@ -196,7 +196,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, * This helper function is used to fix EDID reads for non-compliant * sinks that do not handle the i2c middle-of-transaction flag correctly. */ -static void dp_aux_transfer_helper(struct dp_aux_private *aux, +static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux, struct drm_dp_aux_msg *input_msg, bool send_seg) { @@ -238,7 +238,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = segment_address; helper_msg.buffer = &aux->segment; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); } /* @@ -252,7 +252,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, helper_msg.address = input_msg->address; helper_msg.buffer = &aux->offset; helper_msg.size = 1; - dp_aux_cmd_fifo_tx(aux, &helper_msg); + msm_dp_aux_cmd_fifo_tx(aux, &helper_msg); end: aux->offset += message_size; @@ -265,15 +265,15 @@ end: * It will call aux_reset() function to reset the AUX channel, * if the waiting is timeout. */ -static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, +static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux, struct drm_dp_aux_msg *msg) { ssize_t ret; int const aux_cmd_native_max = 16; int const aux_cmd_i2c_max = 128; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); @@ -292,7 +292,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, return -EINVAL; } - ret = pm_runtime_resume_and_get(dp_aux->dev); + ret = pm_runtime_resume_and_get(msm_dp_aux->dev); if (ret) return ret; @@ -313,8 +313,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, goto exit; } - dp_aux_update_offset_and_segment(aux, msg); - dp_aux_transfer_helper(aux, msg, true); + msm_dp_aux_update_offset_and_segment(aux, msg); + msm_dp_aux_transfer_helper(aux, msg, true); aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); aux->cmd_busy = true; @@ -327,7 +327,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, aux->no_send_stop = true; } - ret = dp_aux_cmd_fifo_tx(aux, msg); + ret = msm_dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { if (aux->native) { aux->retry_cnt++; @@ -335,14 +335,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, phy_calibrate(aux->phy); } /* reset aux if link is in connected state */ - if (dp_catalog_link_is_connected(aux->catalog)) - dp_catalog_aux_reset(aux->catalog); + if (msm_dp_catalog_link_is_connected(aux->catalog)) + msm_dp_catalog_aux_reset(aux->catalog); } else { aux->retry_cnt = 0; switch (aux->aux_error_num) { case DP_AUX_ERR_NONE: if (aux->read) - ret = dp_aux_cmd_fifo_rx(aux, msg); + ret = msm_dp_aux_cmd_fifo_rx(aux, msg); msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; break; case DP_AUX_ERR_DEFER: @@ -364,24 +364,24 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, exit: mutex_unlock(&aux->mutex); - pm_runtime_put_sync(dp_aux->dev); + pm_runtime_put_sync(msm_dp_aux->dev); return ret; } -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux) { u32 isr; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return IRQ_NONE; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); - isr = dp_catalog_aux_get_irq(aux->catalog); + isr = msm_dp_catalog_aux_get_irq(aux->catalog); /* no interrupts pending, return immediately */ if (!isr) @@ -403,7 +403,7 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) if (isr & DP_INTR_AUX_ERROR) { aux->aux_error_num = DP_AUX_ERR_PHY; - dp_catalog_aux_clear_hw_interrupts(aux->catalog); + msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog); } else if (isr & DP_INTR_NACK_DEFER) { aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; } else if (isr & DP_INTR_WRONG_ADDR) { @@ -429,68 +429,68 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) return IRQ_HANDLED; } -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled) +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); aux->enable_xfers = enabled; } -void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); phy_calibrate(aux->phy); - dp_catalog_aux_reset(aux->catalog); + msm_dp_catalog_aux_reset(aux->catalog); } -void dp_aux_init(struct drm_dp_aux *dp_aux) +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); - dp_catalog_aux_enable(aux->catalog, true); + msm_dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; aux->initted = true; mutex_unlock(&aux->mutex); } -void dp_aux_deinit(struct drm_dp_aux *dp_aux) +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_lock(&aux->mutex); aux->initted = false; - dp_catalog_aux_enable(aux->catalog, false); + msm_dp_catalog_aux_enable(aux->catalog, false); mutex_unlock(&aux->mutex); } -int dp_aux_register(struct drm_dp_aux *dp_aux) +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux) { int ret; - if (!dp_aux) { + if (!msm_dp_aux) { DRM_ERROR("invalid input\n"); return -EINVAL; } - ret = drm_dp_aux_register(dp_aux); + ret = drm_dp_aux_register(msm_dp_aux); if (ret) { DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, ret); @@ -500,34 +500,34 @@ int dp_aux_register(struct drm_dp_aux *dp_aux) return 0; } -void dp_aux_unregister(struct drm_dp_aux *dp_aux) +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux) { - drm_dp_aux_unregister(dp_aux); + drm_dp_aux_unregister(msm_dp_aux); } -static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, +static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux, unsigned long wait_us) { int ret; - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); ret = pm_runtime_resume_and_get(aux->dev); if (ret) return ret; - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); + ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); return ret; } -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; if (!catalog) { DRM_ERROR("invalid input\n"); @@ -553,23 +553,23 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, * before registering AUX with the DRM device so that * msm eDP panel can be detected by generic_dep_panel_probe(). */ - aux->dp_aux.name = "dpu_dp_aux"; - aux->dp_aux.dev = dev; - aux->dp_aux.transfer = dp_aux_transfer; - aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted; - drm_dp_aux_init(&aux->dp_aux); + aux->msm_dp_aux.name = "dpu_dp_aux"; + aux->msm_dp_aux.dev = dev; + aux->msm_dp_aux.transfer = msm_dp_aux_transfer; + aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted; + drm_dp_aux_init(&aux->msm_dp_aux); - return &aux->dp_aux; + return &aux->msm_dp_aux; } -void dp_aux_put(struct drm_dp_aux *dp_aux) +void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux) { - struct dp_aux_private *aux; + struct msm_dp_aux_private *aux; - if (!dp_aux) + if (!msm_dp_aux) return; - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux); mutex_destroy(&aux->mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h index 4f65e892a807..39c5b4c8596a 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.h +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -9,18 +9,18 @@ #include "dp_catalog.h" #include <drm/display/drm_dp_helper.h> -int dp_aux_register(struct drm_dp_aux *dp_aux); -void dp_aux_unregister(struct drm_dp_aux *dp_aux); -irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux); -void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled); -void dp_aux_init(struct drm_dp_aux *dp_aux); -void dp_aux_deinit(struct drm_dp_aux *dp_aux); -void dp_aux_reconfig(struct drm_dp_aux *dp_aux); +int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux); +irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled); +void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux); +void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux); struct phy; -struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, +struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog, struct phy *phy, bool is_edp); -void dp_aux_put(struct drm_dp_aux *aux); +void msm_dp_aux_put(struct drm_dp_aux *aux); #endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 6e55cbf69674..b4c8856fb25d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -75,18 +75,18 @@ struct dss_io_data { struct dss_io_region p0; }; -struct dp_catalog_private { +struct msm_dp_catalog_private { struct device *dev; struct drm_device *drm_dev; struct dss_io_data io; u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; - struct dp_catalog dp_catalog; + struct msm_dp_catalog msm_dp_catalog; }; -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state) +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *dss = &catalog->io; msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); @@ -95,12 +95,12 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); } -static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.aux.base + offset); } -static inline void dp_write_aux(struct dp_catalog_private *catalog, +static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -110,12 +110,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog, writel(data, catalog->io.aux.base + offset); } -static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.ahb.base + offset); } -static inline void dp_write_ahb(struct dp_catalog_private *catalog, +static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -125,7 +125,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog, writel(data, catalog->io.ahb.base + offset); } -static inline void dp_write_p0(struct dp_catalog_private *catalog, +static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -135,7 +135,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog, writel(data, catalog->io.p0.base + offset); } -static inline u32 dp_read_p0(struct dp_catalog_private *catalog, +static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog, u32 offset) { /* @@ -145,12 +145,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog, return readl_relaxed(catalog->io.p0.base + offset); } -static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset) { return readl_relaxed(catalog->io.link.base + offset); } -static inline void dp_write_link(struct dp_catalog_private *catalog, +static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog, u32 offset, u32 data) { /* @@ -161,64 +161,64 @@ static inline void dp_write_link(struct dp_catalog_private *catalog, } /* aux related catalog functions */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_aux(catalog, REG_DP_AUX_DATA); + return msm_dp_read_aux(catalog, REG_DP_AUX_DATA); } -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_DATA, data); + msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data); return 0; } -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data) +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); return 0; } -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read) { u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (read) { - data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); data &= ~DP_AUX_TRANS_CTRL_GO; - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); } else { - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); } return 0; } -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); - dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); return 0; } /** - * dp_catalog_aux_reset() - reset AUX controller + * msm_dp_catalog_aux_reset() - reset AUX controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -227,47 +227,47 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) * NOTE: reset AUX controller will also clear any pending HPD related interrupts * */ -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); aux_ctrl |= DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); usleep_range(1000, 1100); /* h/w recommended delay */ aux_ctrl &= ~DP_AUX_CTRL_RESET; - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 aux_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL); if (enable) { - dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); - dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); aux_ctrl |= DP_AUX_CTRL_ENABLE; } else { aux_ctrl &= ~DP_AUX_CTRL_ENABLE; } - dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us) { u32 state; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* poll for hpd connected status every 2ms and timeout after wait_us */ return readl_poll_timeout(catalog->io.aux.base + @@ -294,10 +294,10 @@ static void dump_regs(void __iomem *base, int len) } } -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); struct dss_io_data *io = &catalog->io; pr_info("AHB regs\n"); @@ -313,17 +313,17 @@ void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) dump_regs(io->p0.base, io->p0.len); } -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS); intr &= ~DP_INTERRUPT_STATUS1_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS1) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | DP_INTERRUPT_STATUS1_MASK); return intr; @@ -331,40 +331,40 @@ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) } /* controller related catalog functions */ -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); - dp_write_link(catalog, REG_DP_TU, dp_tu); - dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu); + msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); } -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_link(catalog, REG_DP_STATE_CTRL, state); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state); } -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg); - dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); + msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); } -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ u32 ln_mapping; @@ -373,71 +373,71 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; - dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, ln_mapping); } -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); if (enable) val |= DP_MAINLINK_CTRL_ENABLE; else val &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); } -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable) { u32 mainlink_ctrl; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable); if (enable) { /* * To make sure link reg writes happens before other operation, - * dp_write_link() function uses writel() + * msm_dp_write_link() function uses writel() */ - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | DP_MAINLINK_FB_BOUNDARY_SEL); - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } else { - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } } -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 colorimetry_cfg, u32 test_bits_depth) { u32 misc_val; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); /* clear bpp bits */ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); @@ -447,27 +447,27 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, misc_val |= DP_MISC0_SYNCHRONOUS_CLK; drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); } -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog) +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog) { u32 mainlink_ctrl, hw_revision; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision >= DP_HW_VERSION_1_2) mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE; else mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); } -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420) { @@ -478,8 +478,8 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 const link_rate_hbr3 = 810000; unsigned long den, num; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (rate == link_rate_hbr3) pixel_div = 6; @@ -522,22 +522,22 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, nvid *= 3; drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid); - dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); - dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); - dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); } -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 state_bit) { int bit, ret; u32 data; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); bit = BIT(state_bit - 1); drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit); - dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit); bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; @@ -554,25 +554,25 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, } /** - * dp_catalog_hw_revision() - retrieve DP hw revision + * msm_dp_catalog_hw_revision() - retrieve DP hw revision * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * Return: DP controller hw revision * */ -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog) { - const struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_ahb(catalog, REG_DP_HW_VERSION); + return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION); } /** - * dp_catalog_ctrl_reset() - reset DP controller + * msm_dp_catalog_ctrl_reset() - reset DP controller * - * @dp_catalog: DP catalog structure + * @msm_dp_catalog: DP catalog structure * * return: void * @@ -581,28 +581,28 @@ u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) * NOTE: reset DP controller will also clear any pending HPD related interrupts * */ -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog) { u32 sw_reset; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET); sw_reset |= DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); usleep_range(1000, 1100); /* h/w recommended delay */ sw_reset &= ~DP_SW_RESET; - dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); } -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog) { u32 data; int ret; - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); /* Poll for mainlink ready status */ ret = readl_poll_timeout(catalog->io.link.base + @@ -617,96 +617,96 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) return true; } -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); if (enable) { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, DP_INTERRUPT_STATUS1_MASK); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, DP_INTERRUPT_STATUS2_MASK); } else { - dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); } } -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); config = (en ? config | intr_mask : config & ~intr_mask); drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n", intr_mask, config); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, config & DP_DP_HPD_INT_MASK); } -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); /* Configure REFTIMER and enable it */ reftimer |= DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); /* Enable HPD */ - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE; - dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); - dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); } -static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog) +static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog) { /* trigger sdp */ - dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); } -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 config; /* enable PSR1 function */ - config = dp_read_link(catalog, REG_PSR_CONFIG); + config = msm_dp_read_link(catalog, REG_PSR_CONFIG); config |= PSR1_SUPPORTED; - dp_write_link(catalog, REG_PSR_CONFIG, config); + msm_dp_write_link(catalog, REG_PSR_CONFIG, config); - dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); - dp_catalog_enable_sdp(catalog); + msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); + msm_dp_catalog_enable_sdp(catalog); } -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 cmd; - cmd = dp_read_link(catalog, REG_PSR_CMD); + cmd = msm_dp_read_link(catalog, REG_PSR_CMD); cmd &= ~(PSR_ENTER | PSR_EXIT); @@ -715,17 +715,17 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) else cmd |= PSR_EXIT; - dp_catalog_enable_sdp(catalog); - dp_write_link(catalog, REG_PSR_CMD, cmd); + msm_dp_catalog_enable_sdp(catalog); + msm_dp_write_link(catalog, REG_PSR_CMD, cmd); } -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 status; - status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status); status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; @@ -733,16 +733,16 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) return status; } -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); int isr, mask; - isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); - dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, (isr & DP_DP_HPD_INT_MASK)); - mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); /* * We only want to return interrupts that are unmasked to the caller. @@ -754,115 +754,115 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) return isr & (mask | ~DP_DP_HPD_INT_MASK); } -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4); intr_ack = (intr & DP_INTERRUPT_STATUS4) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); return intr; } -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 intr, intr_ack; - intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2); intr &= ~DP_INTERRUPT_STATUS2_MASK; intr_ack = (intr & DP_INTERRUPT_STATUS2) << DP_INTERRUPT_STATUS_ACK_SHIFT; - dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, intr_ack | DP_INTERRUPT_STATUS2_MASK); return intr; } -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_ahb(catalog, REG_DP_PHY_CTRL, + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); usleep_range(1000, 1100); /* h/w recommended delay */ - dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); + msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); } -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 value = 0x0; /* Make sure to clear the current pattern before starting a new one */ - dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern); switch (pattern) { case DP_PHY_TEST_PATTERN_D10_2: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN1); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: value &= ~(1 << 16); - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); break; case DP_PHY_TEST_PATTERN_PRBS7: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_PRBS7); break; case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); /* 00111110000011111000001111100000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); /* 00001111100000111110000011111000 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); /* 1111100000111110 */ - dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); break; case DP_PHY_TEST_PATTERN_CP2520: - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); value = DP_HBR2_ERM_PATTERN; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); value |= SCRAMBLER_RESET_COUNT_VALUE; - dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); - value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL); value |= DP_MAINLINK_CTRL_ENABLE; - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); break; case DP_PHY_TEST_PATTERN_SEL_MASK: - dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, DP_MAINLINK_CTRL_ENABLE); - dp_write_link(catalog, REG_DP_STATE_CTRL, + msm_dp_write_link(catalog, REG_DP_STATE_CTRL, DP_STATE_CTRL_LINK_TRAINING_PATTERN4); break; default: @@ -872,94 +872,94 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, } } -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - return dp_read_link(catalog, REG_DP_MAINLINK_READY); + return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY); } /* panel related catalog functions */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active) +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 reg; - dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); - dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); - dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); - dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active); + msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); + msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); + msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); + msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active); - reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); + reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); - if (dp_catalog->wide_bus_en) + if (msm_dp_catalog->wide_bus_en) reg |= DP_INTF_CONFIG_DATABUS_WIDEN; else reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN; - DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg); + DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg); - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); return 0; } -static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 header[2]; u32 val; int i; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); + msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header); - dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); - dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]); for (i = 0; i < sizeof(vsc_sdp->db); i += 4) { val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) | (vsc_sdp->db[i + 3] << 24)); - dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); + msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val); } } -static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog) +static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 hw_revision; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - hw_revision = dp_catalog_hw_revision(dp_catalog); + hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog); if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) { - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); - dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00); } } -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp) +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg |= GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 |= GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); - dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp); + msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp); /* indicates presence of VSC (BIT(6) of MISC1) */ misc |= DP_MISC1_VSC_SDP; @@ -967,27 +967,27 @@ void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sd drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 cfg, cfg2, misc; - catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog); - cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); - cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); - misc = dp_read_link(catalog, REG_DP_MISC1_MISC0); + cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); + cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); + misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0); cfg &= ~GEN0_SDP_EN; - dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg); cfg2 &= ~GENERIC0_SDPSIZE_VALID; - dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2); /* switch back to MSA */ misc &= ~DP_MISC1_VSC_SDP; @@ -995,16 +995,16 @@ void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n"); pr_debug("misc settings = 0x%x\n", misc); - dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); + msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc); - dp_catalog_panel_update_sdp(dp_catalog); + msm_dp_catalog_panel_update_sdp(msm_dp_catalog); } -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); u32 hsync_period, vsync_period; u32 display_v_start, display_v_end; u32 hsync_start_x, hsync_end_x; @@ -1036,49 +1036,49 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, display_hctl = (hsync_end_x << 16) | hsync_start_x; - dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); - dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * hsync_period); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); - dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); - dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); - - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, DP_TPG_CHECKERED_RECT_PATTERN); - dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, DP_TPG_VIDEO_CONFIG_BPP_8BIT | DP_TPG_VIDEO_CONFIG_RGB); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, DP_BIST_ENABLE_DPBIST_EN); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, DP_TIMING_ENGINE_EN_EN); drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__); } -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); - dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); - dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); } -static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) +static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len) { struct resource *res; void __iomem *base; @@ -1090,21 +1090,21 @@ static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *l return base; } -static int dp_catalog_get_io(struct dp_catalog_private *catalog) +static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog) { struct platform_device *pdev = to_platform_device(catalog->dev); struct dss_io_data *dss = &catalog->io; - dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len); if (IS_ERR(dss->ahb.base)) return PTR_ERR(dss->ahb.base); - dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len); if (IS_ERR(dss->aux.base)) { /* * The initial binding had a single reg, but in order to * support variation in the sub-region sizes this was split. - * dp_ioremap() will fail with -EINVAL here if only a single + * msm_dp_ioremap() will fail with -EINVAL here if only a single * reg is specified, so fill in the sub-region offsets and * lengths based on this single region. */ @@ -1126,13 +1126,13 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return PTR_ERR(dss->aux.base); } } else { - dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len); if (IS_ERR(dss->link.base)) { DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); return PTR_ERR(dss->link.base); } - dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len); if (IS_ERR(dss->p0.base)) { DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); return PTR_ERR(dss->p0.base); @@ -1142,9 +1142,9 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog) return 0; } -struct dp_catalog *dp_catalog_get(struct device *dev) +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; int ret; catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); @@ -1153,78 +1153,78 @@ struct dp_catalog *dp_catalog_get(struct device *dev) catalog->dev = dev; - ret = dp_catalog_get_io(catalog); + ret = msm_dp_catalog_get_io(catalog); if (ret) return ERR_PTR(ret); - return &catalog->dp_catalog; + return &catalog->msm_dp_catalog; } -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header) +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - return dp_read_link(catalog, sdp_map[sdp][header]); + return msm_dp_read_link(catalog, sdp_map[sdp][header]); } -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); sdp_map = catalog->audio_map; - dp_write_link(catalog, sdp_map[sdp][header], data); + msm_dp_write_link(catalog, sdp_map[sdp][header], data); } -void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select) +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 acr_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n", select, acr_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } -void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 audio_ctrl; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG); if (enable) audio_ctrl |= BIT(0); @@ -1233,24 +1233,24 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl); - dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); /* make sure audio engine is disabled */ wmb(); } -void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 sdp_cfg = 0; u32 sdp_cfg2 = 0; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG); /* AUDIO_TIMESTAMP_SDP_EN */ sdp_cfg |= BIT(1); /* AUDIO_STREAM_SDP_EN */ @@ -1264,9 +1264,9 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg); - dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); - sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2); /* IFRM_REGSRC -> Do not use reg values */ sdp_cfg2 &= ~BIT(0); /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ @@ -1274,12 +1274,12 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2); - dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); + msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); } -void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { { @@ -1309,27 +1309,27 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog) }, }; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); catalog->audio_map = sdp_map; } -void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level) +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level) { - struct dp_catalog_private *catalog; + struct msm_dp_catalog_private *catalog; u32 mainlink_levels; - if (!dp_catalog) + if (!msm_dp_catalog) return; - catalog = container_of(dp_catalog, - struct dp_catalog_private, dp_catalog); + catalog = container_of(msm_dp_catalog, + struct msm_dp_catalog_private, msm_dp_catalog); - mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); mainlink_levels &= 0xFE0; mainlink_levels |= safe_to_exit_level; @@ -1337,5 +1337,5 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", mainlink_levels, safe_to_exit_level); - dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); + msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 4679d50b8c73..e932b17eecbf 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -31,7 +31,7 @@ #define DP_HW_VERSION_1_0 0x10000000 #define DP_HW_VERSION_1_2 0x10020000 -enum dp_catalog_audio_sdp_type { +enum msm_dp_catalog_audio_sdp_type { DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_INFOFRAME, @@ -40,89 +40,89 @@ enum dp_catalog_audio_sdp_type { DP_AUDIO_SDP_MAX, }; -enum dp_catalog_audio_header_type { +enum msm_dp_catalog_audio_header_type { DP_AUDIO_SDP_HEADER_1, DP_AUDIO_SDP_HEADER_2, DP_AUDIO_SDP_HEADER_3, DP_AUDIO_SDP_HEADER_MAX, }; -struct dp_catalog { +struct msm_dp_catalog { bool wide_bus_en; }; /* Debug module */ -void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state); +void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state); /* AUX APIs */ -u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data); -int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); -int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); -void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); -void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, +u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data); +int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read); +int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog, unsigned long wait_us); -u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog); /* DP Controller APIs */ -void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); -void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); -void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); -void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, +void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state); +void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config); +void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb); +void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate, u32 stream_rate_khz, bool is_ycbcr_420); -int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern); -u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); -bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); -void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, +int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); +u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog); +bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable); +void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog, u32 intr_mask, bool en); -void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter); -u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); -u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); -int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); -u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog); -void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, - u32 dp_tu, u32 valid_boundary, +void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter); +u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog); +int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog); +u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog, + u32 msm_dp_tu, u32 valid_boundary, u32 valid_boundary2); -void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, +void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog, u32 pattern); -u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); +u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog); /* DP Panel APIs */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, - u32 sync_start, u32 width_blanking, u32 dp_active); -void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp); -void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog); -void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); -void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, +int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 msm_dp_active); +void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp); +void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog); +void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog, struct drm_display_mode *drm_mode); -void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); +void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog); -struct dp_catalog *dp_catalog_get(struct device *dev); +struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev); /* DP Audio APIs */ -u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header); -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, - enum dp_catalog_audio_sdp_type sdp, - enum dp_catalog_audio_header_type header, +u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header); +void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog, + enum msm_dp_catalog_audio_sdp_type sdp, + enum msm_dp_catalog_audio_header_type header, u32 data); -void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select); -void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable); -void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); -void dp_catalog_audio_init(struct dp_catalog *catalog); -void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level); +void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select); +void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable); +void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog); +void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level); #endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index f342fc5ae41e..bc2ca8133b79 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -40,7 +40,7 @@ enum { DP_TRAINING_2, }; -struct dp_tu_calc_input { +struct msm_dp_tu_calc_input { u64 lclk; /* 162, 270, 540 and 810 */ u64 pclk_khz; /* in KHz */ u64 hactive; /* active h-width */ @@ -55,7 +55,7 @@ struct dp_tu_calc_input { int num_of_dsc_slices; /* number of slices per line */ }; -struct dp_vc_tu_mapping_table { +struct msm_dp_vc_tu_mapping_table { u32 vic; u8 lanes; u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ @@ -69,14 +69,14 @@ struct dp_vc_tu_mapping_table { u8 tu_size_minus1; }; -struct dp_ctrl_private { - struct dp_ctrl dp_ctrl; +struct msm_dp_ctrl_private { + struct msm_dp_ctrl msm_dp_ctrl; struct drm_device *drm_dev; struct device *dev; struct drm_dp_aux *aux; - struct dp_panel *panel; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_panel *panel; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; struct phy *phy; @@ -99,8 +99,8 @@ struct dp_ctrl_private { bool stream_clks_on; }; -static int dp_aux_link_configure(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_configure(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 values[2]; int err; @@ -118,14 +118,14 @@ static int dp_aux_link_configure(struct drm_dp_aux *aux, return 0; } -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); reinit_completion(&ctrl->idle_comp); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); if (!wait_for_completion_timeout(&ctrl->idle_comp, IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) @@ -134,7 +134,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "mainlink off\n"); } -static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl) { u32 config = 0, tbd; const u8 *dpcd = ctrl->panel->dpcd; @@ -142,15 +142,15 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) /* Default-> LSCLK DIV: 1/4 LCLK */ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */ /* Scrambler reset enable */ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) config |= DP_CONFIGURATION_CTRL_ASSR; - tbd = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); + tbd = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; @@ -170,24 +170,24 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) if (ctrl->panel->psr_cap.version) config |= DP_CONFIGURATION_CTRL_SEND_VSC; - dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); + msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); } -static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl) { u32 cc, tb; - dp_catalog_ctrl_lane_mapping(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); - dp_catalog_setup_peripheral_flush(ctrl->catalog); + msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_setup_peripheral_flush(ctrl->catalog); - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); - tb = dp_link_get_test_bits_depth(ctrl->link, - ctrl->panel->dp_mode.bpp); - cc = dp_link_get_colorimetry_config(ctrl->link); - dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); - dp_panel_timing_cfg(ctrl->panel); + tb = msm_dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->msm_dp_mode.bpp); + cc = msm_dp_link_get_colorimetry_config(ctrl->link); + msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + msm_dp_panel_timing_cfg(ctrl->panel); } /* @@ -310,7 +310,7 @@ static int _tu_param_compare(s64 a, s64 b) } } -static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, +static void msm_dp_panel_update_tu_timings(struct msm_dp_tu_calc_input *in, struct tu_algo_data *tu) { int nlanes = in->nlanes; @@ -622,9 +622,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu) } } -static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, - struct dp_tu_calc_input *in, - struct dp_vc_tu_mapping_table *tu_table) +static void _dp_ctrl_calc_tu(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_tu_calc_input *in, + struct msm_dp_vc_tu_mapping_table *tu_table) { struct tu_algo_data *tu; int compare_result_1, compare_result_2; @@ -645,7 +645,7 @@ static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, if (!tu) return; - dp_panel_update_tu_timings(in, tu); + msm_dp_panel_update_tu_timings(in, tu); tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ @@ -956,21 +956,21 @@ tu_size_calc: kfree(tu); } -static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, - struct dp_vc_tu_mapping_table *tu_table) +static void msm_dp_ctrl_calc_tu_parameters(struct msm_dp_ctrl_private *ctrl, + struct msm_dp_vc_tu_mapping_table *tu_table) { - struct dp_tu_calc_input in; + struct msm_dp_tu_calc_input in; struct drm_display_mode *drm_mode; - drm_mode = &ctrl->panel->dp_mode.drm_mode; + drm_mode = &ctrl->panel->msm_dp_mode.drm_mode; in.lclk = ctrl->link->link_params.rate / 1000; in.pclk_khz = drm_mode->clock; in.hactive = drm_mode->hdisplay; in.hporch = drm_mode->htotal - drm_mode->hdisplay; in.nlanes = ctrl->link->link_params.num_lanes; - in.bpp = ctrl->panel->dp_mode.bpp; - in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444; + in.bpp = ctrl->panel->msm_dp_mode.bpp; + in.pixel_enc = ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420 ? 420 : 444; in.dsc_en = 0; in.async_en = 0; in.fec_en = 0; @@ -980,16 +980,16 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, _dp_ctrl_calc_tu(ctrl, &in, tu_table); } -static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl) { - u32 dp_tu = 0x0; + u32 msm_dp_tu = 0x0; u32 valid_boundary = 0x0; u32 valid_boundary2 = 0x0; - struct dp_vc_tu_mapping_table tu_calc_table; + struct msm_dp_vc_tu_mapping_table tu_calc_table; - dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + msm_dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); - dp_tu |= tu_calc_table.tu_size_minus1; + msm_dp_tu |= tu_calc_table.tu_size_minus1; valid_boundary |= tu_calc_table.valid_boundary_link; valid_boundary |= (tu_calc_table.delay_start_link << 16); @@ -1001,13 +1001,13 @@ static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) valid_boundary2 |= BIT(0); pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", - dp_tu, valid_boundary, valid_boundary2); + msm_dp_tu, valid_boundary, valid_boundary2); - dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, - dp_tu, valid_boundary, valid_boundary2); + msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + msm_dp_tu, valid_boundary, valid_boundary2); } -static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1019,7 +1019,7 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl, u8 v_level, u8 p_level) { union phy_configure_opts *phy_opts = &ctrl->phy_opts; @@ -1034,9 +1034,9 @@ static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl, return 0; } -static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl) { - struct dp_link *link = ctrl->link; + struct msm_dp_link *link = ctrl->link; int ret = 0, lane, lane_cnt; u8 buf[4]; u32 max_level_reached = 0; @@ -1046,7 +1046,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "voltage level: %d emphasis level: %d\n", voltage_swing_level, pre_emphasis_level); - ret = dp_ctrl_set_vx_px(ctrl, + ret = msm_dp_ctrl_set_vx_px(ctrl, voltage_swing_level, pre_emphasis_level); if (ret) @@ -1083,7 +1083,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) return ret; } -static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, +static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, u8 pattern) { u8 buf; @@ -1100,7 +1100,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, return ret == 1; } -static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, u8 *link_status) { int ret = 0, len; @@ -1114,24 +1114,24 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, return ret; } -static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries, old_v_level, ret = 0; u8 link_status[DP_LINK_STATUS_SIZE]; int const maximum_retries = 4; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_1; - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); - ret = dp_ctrl_update_vx_px(ctrl); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1140,7 +1140,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, for (tries = 0; tries < maximum_retries; tries++) { drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1160,8 +1160,8 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, old_v_level = ctrl->link->phy_params.v_level; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; } @@ -1170,7 +1170,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_rate_down_shift(struct msm_dp_ctrl_private *ctrl) { int ret = 0; @@ -1198,7 +1198,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl) { if (ctrl->link->link_params.num_lanes == 1) @@ -1213,13 +1213,13 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) return 0; } -static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl) { - dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); } -static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, int *training_step) { int tries = 0, ret = 0; @@ -1228,7 +1228,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, int const maximum_retries = 5; u8 link_status[DP_LINK_STATUS_SIZE]; - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); *training_step = DP_TRAINING_2; @@ -1243,16 +1243,16 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, state_ctrl_bit = 2; } - ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); + ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, pattern); + msm_dp_ctrl_train_pattern_set(ctrl, pattern); for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); - ret = dp_ctrl_read_link_status(ctrl, link_status); + ret = msm_dp_ctrl_read_link_status(ctrl, link_status); if (ret) return ret; @@ -1261,8 +1261,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return 0; } - dp_link_adjust_levels(ctrl->link, link_status); - ret = dp_ctrl_update_vx_px(ctrl); + msm_dp_link_adjust_levels(ctrl->link, link_status); + ret = msm_dp_ctrl_update_vx_px(ctrl); if (ret) return ret; @@ -1271,24 +1271,24 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; const u8 *dpcd = ctrl->panel->dpcd; u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; u8 assr; - struct dp_link_info link_info = {0}; + struct msm_dp_link_info link_info = {0}; - dp_ctrl_config_ctrl(ctrl); + msm_dp_ctrl_config_ctrl(ctrl); link_info.num_lanes = ctrl->link->link_params.num_lanes; link_info.rate = ctrl->link->link_params.rate; link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; - dp_link_reset_phy_params_vx_px(ctrl->link); + msm_dp_link_reset_phy_params_vx_px(ctrl->link); - dp_aux_link_configure(ctrl->aux, &link_info); + msm_dp_aux_link_configure(ctrl->aux, &link_info); if (drm_dp_max_downspread(dpcd)) encoding[0] |= DP_SPREAD_AMP_0_5; @@ -1302,7 +1302,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, &assr, 1); } - ret = dp_ctrl_link_train_1(ctrl, training_step); + ret = msm_dp_ctrl_link_train_1(ctrl, training_step); if (ret) { DRM_ERROR("link training #1 failed. ret=%d\n", ret); goto end; @@ -1311,7 +1311,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, /* print success info as this is a result of user initiated action */ drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n"); - ret = dp_ctrl_link_train_2(ctrl, training_step); + ret = msm_dp_ctrl_link_train_2(ctrl, training_step); if (ret) { DRM_ERROR("link training #2 failed. ret=%d\n", ret); goto end; @@ -1321,17 +1321,17 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n"); end: - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); return ret; } -static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, +static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl, int *training_step) { int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) return ret; @@ -1342,17 +1342,17 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * a link training pattern, we have to first do soft reset. */ - ret = dp_ctrl_link_train(ctrl, training_step); + ret = msm_dp_ctrl_link_train(ctrl, training_step); return ret; } -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n"); @@ -1374,11 +1374,11 @@ int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks); @@ -1391,12 +1391,12 @@ void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret = 0; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->link_clks_on) { drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n"); @@ -1406,7 +1406,7 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) if (!ctrl->core_clks_on) { drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n"); - dp_ctrl_core_clk_enable(dp_ctrl); + msm_dp_ctrl_core_clk_enable(msm_dp_ctrl); } ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks); @@ -1424,11 +1424,11 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl) return 0; } -static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) +static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks); @@ -1441,7 +1441,7 @@ static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl) ctrl->core_clks_on ? "on" : "off"); } -static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl) { int ret = 0; struct phy *phy = ctrl->phy; @@ -1455,7 +1455,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) phy_power_on(phy); dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000); - ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_link_clk_enable(&ctrl->msm_dp_ctrl); if (ret) DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); @@ -1464,13 +1464,13 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) return ret; } -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); /* * all dp controller programmable registers will not @@ -1478,28 +1478,28 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) * therefore interrupt mask bits have to be updated * to enable/disable interrupts */ - dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); + msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); } -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl) { u8 cfg; - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; - dp_catalog_ctrl_config_psr(ctrl->catalog); + msm_dp_catalog_ctrl_config_psr(ctrl->catalog); cfg = DP_PSR_ENABLE; drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1); } -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter) { - struct dp_ctrl_private *ctrl = container_of(dp_ctrl, - struct dp_ctrl_private, dp_ctrl); + struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl, + struct msm_dp_ctrl_private, msm_dp_ctrl); if (!ctrl->panel->psr_cap.version) return; @@ -1516,64 +1516,64 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) */ if (enter) { reinit_completion(&ctrl->psr_op_comp); - dp_catalog_ctrl_set_psr(ctrl->catalog, true); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true); if (!wait_for_completion_timeout(&ctrl->psr_op_comp, PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) { DRM_ERROR("PSR_ENTRY timedout\n"); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); return; } - dp_ctrl_push_idle(dp_ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_ctrl_push_idle(msm_dp_ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); } else { - dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); + msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); - dp_catalog_ctrl_set_psr(ctrl->catalog, false); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - dp_ctrl_wait4video_ready(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_ctrl_wait4video_ready(ctrl); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); } } -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_phy_reset(ctrl->catalog); + msm_dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_exit(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy = ctrl->phy; int ret = 0; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes; phy_configure(phy, &ctrl->phy_opts); /* @@ -1583,13 +1583,13 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) */ dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); /* hw recommended delay before re-enabling clocks */ msleep(20); - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); return ret; @@ -1598,18 +1598,18 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) return ret; } -static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl) { struct phy *phy; phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -1622,30 +1622,30 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) return 0; } -static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl) { int ret = 0; int training_step = DP_TRAINING_NONE; - dp_ctrl_push_idle(&ctrl->dp_ctrl); + msm_dp_ctrl_push_idle(&ctrl->msm_dp_ctrl); ctrl->link->phy_params.p_level = 0; ctrl->link->phy_params.v_level = 0; - ret = dp_ctrl_setup_main_link(ctrl, &training_step); + ret = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (ret) goto end; - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); end: return ret; } -static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl) { bool success = false; u32 pattern_sent = 0x0; @@ -1653,17 +1653,17 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested); - if (dp_ctrl_set_vx_px(ctrl, + if (msm_dp_ctrl_set_vx_px(ctrl, ctrl->link->phy_params.v_level, ctrl->link->phy_params.p_level)) { DRM_ERROR("Failed to set v/p levels\n"); return false; } - dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); - dp_ctrl_update_vx_px(ctrl); - dp_link_send_test_response(ctrl->link); + msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + msm_dp_ctrl_update_vx_px(ctrl); + msm_dp_link_send_test_response(ctrl->link); - pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); switch (pattern_sent) { case MR_LINK_TRAINING1: @@ -1697,7 +1697,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) return success; } -static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_process_phy_test_request(struct msm_dp_ctrl_private *ctrl) { int ret; unsigned long pixel_rate; @@ -1713,15 +1713,15 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) * running. Add the global reset just before disabling the * link clocks and core clocks. */ - dp_ctrl_off(&ctrl->dp_ctrl); + msm_dp_ctrl_off(&ctrl->msm_dp_ctrl); - ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_on_link(&ctrl->msm_dp_ctrl); if (ret) { DRM_ERROR("failed to enable DP link controller\n"); return ret; } - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000); if (ret) { DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret); @@ -1739,49 +1739,49 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ctrl->stream_clks_on = true; } - dp_ctrl_send_phy_test_pattern(ctrl); + msm_dp_ctrl_send_phy_test_pattern(ctrl); return 0; } -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 sink_request = 0x0; - if (!dp_ctrl) { + if (!msm_dp_ctrl) { DRM_ERROR("invalid input\n"); return; } - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); sink_request = ctrl->link->sink_request; if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n"); - if (dp_ctrl_process_phy_test_request(ctrl)) { + if (msm_dp_ctrl_process_phy_test_request(ctrl)) { DRM_ERROR("process phy_test_req failed\n"); return; } } if (sink_request & DP_LINK_STATUS_UPDATED) { - if (dp_ctrl_link_maintenance(ctrl)) { + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } if (sink_request & DP_TEST_LINK_TRAINING) { - dp_link_send_test_response(ctrl->link); - if (dp_ctrl_link_maintenance(ctrl)) { + msm_dp_link_send_test_response(ctrl->link); + if (msm_dp_ctrl_link_maintenance(ctrl)) { DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); return; } } } -static bool dp_ctrl_clock_recovery_any_ok( +static bool msm_dp_ctrl_clock_recovery_any_ok( const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count) { @@ -1800,20 +1800,20 @@ static bool dp_ctrl_clock_recovery_any_ok( return drm_dp_clock_recovery_ok(link_status, reduced_cnt); } -static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) +static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) { u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) { int rc = 0; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 rate; int link_train_max_retries = 5; u32 const phy_cts_pixel_clk_khz = 148500; @@ -1821,15 +1821,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) unsigned int training_step; unsigned long pixel_rate; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); rate = ctrl->panel->link_info.rate; - pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock; - dp_ctrl_core_clk_enable(&ctrl->dp_ctrl); + msm_dp_ctrl_core_clk_enable(&ctrl->msm_dp_ctrl); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, @@ -1840,7 +1840,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate = rate; ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; - if (ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; } @@ -1848,32 +1848,32 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, pixel_rate); - rc = dp_ctrl_enable_mainlink_clocks(ctrl); + rc = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (rc) return rc; while (--link_train_max_retries) { training_step = DP_TRAINING_NONE; - rc = dp_ctrl_setup_main_link(ctrl, &training_step); + rc = msm_dp_ctrl_setup_main_link(ctrl, &training_step); if (rc == 0) { /* training completed successfully */ break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ - if (dp_ctrl_clock_recovery_any_ok(link_status, + if (msm_dp_ctrl_clock_recovery_any_ok(link_status, ctrl->link->link_params.num_lanes)) { /* * some lanes are ready, * reduce lane number */ - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* lane == 1 already */ /* end with failure */ break; @@ -1885,16 +1885,16 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } else if (training_step == DP_TRAINING_2) { /* link train_2 failed */ - if (!dp_catalog_link_is_connected(ctrl->catalog)) + if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - dp_ctrl_read_link_status(ctrl, link_status); + msm_dp_ctrl_read_link_status(ctrl, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) - rc = dp_ctrl_link_rate_down_shift(ctrl); + rc = msm_dp_ctrl_link_rate_down_shift(ctrl); else - rc = dp_ctrl_link_lane_down_shift(ctrl); + rc = msm_dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* end with failure */ @@ -1902,10 +1902,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } /* stop link training before start re training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); } - rc = dp_ctrl_reinitialize_mainlink(ctrl); + rc = msm_dp_ctrl_reinitialize_mainlink(ctrl); if (rc) { DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); break; @@ -1926,38 +1926,38 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) * link training failed * end txing train pattern here */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); - dp_ctrl_deinitialize_mainlink(ctrl); + msm_dp_ctrl_deinitialize_mainlink(ctrl); rc = -ECONNRESET; } return rc; } -static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) +static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl) { int training_step = DP_TRAINING_NONE; - return dp_ctrl_setup_main_link(ctrl, &training_step); + return msm_dp_ctrl_setup_main_link(ctrl, &training_step); } -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; unsigned long pixel_rate; unsigned long pixel_rate_orig; - if (!dp_ctrl) + if (!msm_dp_ctrl) return -EINVAL; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); - pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = pixel_rate_orig = ctrl->panel->msm_dp_mode.drm_mode.clock; - if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420) + if (msm_dp_ctrl->wide_bus_en || ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420) pixel_rate >>= 1; drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", @@ -1969,7 +1969,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on); if (!ctrl->link_clks_on) { /* link clk is off */ - ret = dp_ctrl_enable_mainlink_clocks(ctrl); + ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl); if (ret) { DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); goto end; @@ -1993,11 +1993,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl->stream_clks_on = true; } - if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) - dp_ctrl_link_retrain(ctrl); + if (force_link_train || !msm_dp_ctrl_channel_eq_ok(ctrl)) + msm_dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ - dp_ctrl_clear_training_pattern(ctrl); + msm_dp_ctrl_clear_training_pattern(ctrl); /* * Set up transfer unit values and set controller state to send @@ -2005,22 +2005,22 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) */ reinit_completion(&ctrl->video_comp); - dp_ctrl_configure_source_params(ctrl); + msm_dp_ctrl_configure_source_params(ctrl); - dp_catalog_ctrl_config_msa(ctrl->catalog, + msm_dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, pixel_rate_orig, - ctrl->panel->dp_mode.out_fmt_is_yuv_420); + ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420); - dp_ctrl_setup_tr_unit(ctrl); + msm_dp_ctrl_setup_tr_unit(ctrl); - dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); - ret = dp_ctrl_wait4video_ready(ctrl); + ret = msm_dp_ctrl_wait4video_ready(ctrl); if (ret) return ret; - mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog); drm_dbg_dp(ctrl->drm_dev, "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); @@ -2028,20 +2028,20 @@ end: return ret; } -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); /* set dongle to D3 (power off) mode */ - dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); + msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2049,7 +2049,7 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); @@ -2061,17 +2061,17 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n", phy, phy->init_count, phy->power_count); @@ -2082,19 +2082,19 @@ void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -void dp_ctrl_off(struct dp_ctrl *dp_ctrl) +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct phy *phy; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); phy = ctrl->phy; - dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); + msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog); - dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); - dp_catalog_ctrl_reset(ctrl->catalog); + msm_dp_catalog_ctrl_reset(ctrl->catalog); if (ctrl->stream_clks_on) { clk_disable_unprepare(ctrl->pixel_clk); @@ -2102,26 +2102,26 @@ void dp_ctrl_off(struct dp_ctrl *dp_ctrl) } dev_pm_opp_set_rate(ctrl->dev, 0); - dp_ctrl_link_clk_disable(&ctrl->dp_ctrl); + msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl); phy_power_off(phy); drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", phy, phy->init_count, phy->power_count); } -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; u32 isr; irqreturn_t ret = IRQ_NONE; - if (!dp_ctrl) + if (!msm_dp_ctrl) return IRQ_NONE; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); if (ctrl->panel->psr_cap.version) { - isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); + isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); if (isr) complete(&ctrl->psr_op_comp); @@ -2136,7 +2136,7 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl) drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n"); } - isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog); if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { @@ -2164,13 +2164,13 @@ static const char *ctrl_clks[] = { "ctrl_link_iface", }; -static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) +static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; struct device *dev; int i, rc; - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl); dev = ctrl->dev; ctrl->num_core_clks = ARRAY_SIZE(core_clks); @@ -2204,12 +2204,12 @@ static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl) return 0; } -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy) { - struct dp_ctrl_private *ctrl; + struct msm_dp_ctrl_private *ctrl; int ret; if (!dev || !panel || !aux || @@ -2228,7 +2228,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, if (ret) { dev_err(dev, "invalid DP OPP table in device tree\n"); /* caller do PTR_ERR(opp_table) */ - return (struct dp_ctrl *)ERR_PTR(ret); + return (struct msm_dp_ctrl *)ERR_PTR(ret); } /* OPP table is optional */ @@ -2248,11 +2248,11 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, ctrl->dev = dev; ctrl->phy = phy; - ret = dp_ctrl_clk_init(&ctrl->dp_ctrl); + ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl); if (ret) { dev_err(dev, "failed to init clocks\n"); return ERR_PTR(ret); } - return &ctrl->dp_ctrl; + return &ctrl->msm_dp_ctrl; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index ffcbd9a25748..b7abfedbf574 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -11,34 +11,34 @@ #include "dp_link.h" #include "dp_catalog.h" -struct dp_ctrl { +struct msm_dp_ctrl { bool wide_bus_en; }; struct phy; -int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); -void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); -void dp_ctrl_off(struct dp_ctrl *dp_ctrl); -void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); -irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl); -void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); -struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, - struct dp_panel *panel, struct drm_dp_aux *aux, - struct dp_catalog *catalog, +int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl); +int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train); +void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl); +irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl); +struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link, + struct msm_dp_panel *panel, struct drm_dp_aux *aux, + struct msm_dp_catalog *catalog, struct phy *phy); -void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl); -void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl); -void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl); -void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable); -void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl); +void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enable); +void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl); -int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl); -void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl); +int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl); +void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl); #endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index b8611f6d2296..22fd946ee201 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -17,15 +17,15 @@ #define DEBUG_NAME "msm_dp" -struct dp_debug_private { - struct dp_link *link; - struct dp_panel *panel; +struct msm_dp_debug_private { + struct msm_dp_link *link; + struct msm_dp_panel *panel; struct drm_connector *connector; }; -static int dp_debug_show(struct seq_file *seq, void *p) +static int msm_dp_debug_show(struct seq_file *seq, void *p) { - struct dp_debug_private *debug = seq->private; + struct msm_dp_debug_private *debug = seq->private; u64 lclk = 0; u32 link_params_rate; const struct drm_display_mode *drm_mode; @@ -33,7 +33,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) if (!debug) return -ENODEV; - drm_mode = &debug->panel->dp_mode.drm_mode; + drm_mode = &debug->panel->msm_dp_mode.drm_mode; seq_printf(seq, "\tname = %s\n", DEBUG_NAME); seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", @@ -55,8 +55,8 @@ static int dp_debug_show(struct seq_file *seq, void *p) drm_mode->hsync_end - drm_mode->hsync_start, drm_mode->vsync_end - drm_mode->vsync_start); seq_printf(seq, "\t\tactive_low = %dx%d\n", - debug->panel->dp_mode.h_active_low, - debug->panel->dp_mode.v_active_low); + debug->panel->msm_dp_mode.h_active_low, + debug->panel->msm_dp_mode.v_active_low); seq_printf(seq, "\t\th_skew = %d\n", drm_mode->hskew); seq_printf(seq, "\t\trefresh rate = %d\n", @@ -64,7 +64,7 @@ static int dp_debug_show(struct seq_file *seq, void *p) seq_printf(seq, "\t\tpixel clock khz = %d\n", drm_mode->clock); seq_printf(seq, "\t\tbpp = %d\n", - debug->panel->dp_mode.bpp); + debug->panel->msm_dp_mode.bpp); /* Link Information */ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", @@ -83,11 +83,11 @@ static int dp_debug_show(struct seq_file *seq, void *p) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_debug); +DEFINE_SHOW_ATTRIBUTE(msm_dp_debug); -static int dp_test_data_show(struct seq_file *m, void *data) +static int msm_dp_test_data_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; u32 bpc; @@ -98,18 +98,18 @@ static int dp_test_data_show(struct seq_file *m, void *data) seq_printf(m, "vdisplay: %d\n", debug->link->test_video.test_v_height); seq_printf(m, "bpc: %u\n", - dp_link_bit_depth_to_bpp(bpc) / 3); + msm_dp_link_bit_depth_to_bpp(bpc) / 3); } else { seq_puts(m, "0"); } return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_data); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_data); -static int dp_test_type_show(struct seq_file *m, void *data) +static int msm_dp_test_type_show(struct seq_file *m, void *data) { - const struct dp_debug_private *debug = m->private; + const struct msm_dp_debug_private *debug = m->private; const struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) @@ -119,15 +119,15 @@ static int dp_test_type_show(struct seq_file *m, void *data) return 0; } -DEFINE_SHOW_ATTRIBUTE(dp_test_type); +DEFINE_SHOW_ATTRIBUTE(msm_dp_test_type); -static ssize_t dp_test_active_write(struct file *file, +static ssize_t msm_dp_test_active_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { char *input_buffer; int status = 0; - const struct dp_debug_private *debug; + const struct msm_dp_debug_private *debug; const struct drm_connector *connector; int val = 0; @@ -164,9 +164,9 @@ static ssize_t dp_test_active_write(struct file *file, return len; } -static int dp_test_active_show(struct seq_file *m, void *data) +static int msm_dp_test_active_show(struct seq_file *m, void *data) { - struct dp_debug_private *debug = m->private; + struct msm_dp_debug_private *debug = m->private; struct drm_connector *connector = debug->connector; if (connector->status == connector_status_connected) { @@ -181,28 +181,28 @@ static int dp_test_active_show(struct seq_file *m, void *data) return 0; } -static int dp_test_active_open(struct inode *inode, +static int msm_dp_test_active_open(struct inode *inode, struct file *file) { - return single_open(file, dp_test_active_show, + return single_open(file, msm_dp_test_active_show, inode->i_private); } static const struct file_operations test_active_fops = { .owner = THIS_MODULE, - .open = dp_test_active_open, + .open = msm_dp_test_active_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, - .write = dp_test_active_write + .write = msm_dp_test_active_write }; -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) { - struct dp_debug_private *debug; + struct msm_dp_debug_private *debug; if (!dev || !panel || !link) { DRM_ERROR("invalid input\n"); @@ -217,20 +217,20 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, debug->panel = panel; debugfs_create_file("dp_debug", 0444, root, - debug, &dp_debug_fops); + debug, &msm_dp_debug_fops); if (!is_edp) { - debugfs_create_file("msm_dp_test_active", 0444, + debugfs_create_file("dp_test_active", 0444, root, debug, &test_active_fops); - debugfs_create_file("msm_dp_test_data", 0444, + debugfs_create_file("dp_test_data", 0444, root, - debug, &dp_test_data_fops); + debug, &msm_dp_test_data_fops); - debugfs_create_file("msm_dp_test_type", 0444, + debugfs_create_file("dp_test_type", 0444, root, - debug, &dp_test_type_fops); + debug, &msm_dp_test_type_fops); } return 0; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 7e1aa892fc09..6dc0ff4f0f65 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -12,7 +12,7 @@ #if defined(CONFIG_DEBUG_FS) /** - * dp_debug_get() - configure and get the DisplayPlot debug module data + * msm_dp_debug_get() - configure and get the DisplayPlot debug module data * * @dev: device instance of the caller * @panel: instance of panel module @@ -25,8 +25,8 @@ * This function sets up the debug module and provides a way * for debugfs input to be communicated with existing modules */ -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp); @@ -34,8 +34,8 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel, #else static inline -int dp_debug_init(struct device *dev, struct dp_panel *panel, - struct dp_link *link, +int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel, + struct msm_dp_link *link, struct drm_connector *connector, struct dentry *root, bool is_edp) diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index e1228fb093ee..aba925aab7ad 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -67,13 +67,13 @@ enum { #define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) -struct dp_event { +struct msm_dp_event { u32 event_id; u32 data; u32 delay; }; -struct dp_display_private { +struct msm_dp_display_private { int irq; unsigned int id; @@ -85,14 +85,14 @@ struct dp_display_private { struct drm_device *drm_dev; - struct dp_catalog *catalog; + struct msm_dp_catalog *catalog; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_panel *panel; - struct dp_ctrl *ctrl; + struct msm_dp_link *link; + struct msm_dp_panel *panel; + struct msm_dp_ctrl *ctrl; - struct dp_display_mode dp_mode; - struct msm_dp dp_display; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp msm_dp_display; /* wait for audio signaling */ struct completion audio_comp; @@ -104,12 +104,12 @@ struct dp_display_private { u32 event_pndx; u32 event_gndx; struct task_struct *ev_tsk; - struct dp_event event_list[DP_EVENT_Q_MAX]; + struct msm_dp_event event_list[DP_EVENT_Q_MAX]; spinlock_t event_lock; bool wide_bus_supported; - struct dp_audio *audio; + struct msm_dp_audio *audio; }; struct msm_dp_desc { @@ -118,25 +118,33 @@ struct msm_dp_desc { bool wide_bus_supported; }; -static const struct msm_dp_desc sc7180_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sa8775p[] = { + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x22154000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x2215c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, + {} +}; + +static const struct msm_dp_desc msm_dp_desc_sc7180[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc7280_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc7280[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8180x_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8180x[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8280xp_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sc8280xp[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -148,12 +156,12 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = { {} }; -static const struct msm_dp_desc sm8650_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_sm8650[] = { { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc x1e80100_dp_descs[] = { +static const struct msm_dp_desc msm_dp_desc_x1e80100[] = { { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, @@ -161,70 +169,71 @@ static const struct msm_dp_desc x1e80100_dp_descs[] = { {} }; -static const struct of_device_id dp_dt_match[] = { - { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs }, - { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs }, - { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs }, - { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs }, +static const struct of_device_id msm_dp_dt_match[] = { + { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p }, + { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc7280-edp", .data = &msm_dp_desc_sc7280 }, + { .compatible = "qcom,sc8180x-dp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x }, + { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp }, + { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 }, + { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 }, + { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 }, {} }; -static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); - return container_of(dp, struct dp_display_private, dp_display); + return container_of(dp, struct msm_dp_display_private, msm_dp_display); } -static int dp_add_event(struct dp_display_private *dp_priv, u32 event, +static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event, u32 data, u32 delay) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int pndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - pndx = dp_priv->event_pndx + 1; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + pndx = msm_dp_priv->event_pndx + 1; pndx %= DP_EVENT_Q_MAX; - if (pndx == dp_priv->event_gndx) { + if (pndx == msm_dp_priv->event_gndx) { pr_err("event_q is full: pndx=%d gndx=%d\n", - dp_priv->event_pndx, dp_priv->event_gndx); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + msm_dp_priv->event_pndx, msm_dp_priv->event_gndx); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -EPERM; } - todo = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo->event_id = event; todo->data = data; todo->delay = delay; - wake_up(&dp_priv->event_q); - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + wake_up(&msm_dp_priv->event_q); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event) { unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; u32 gndx; - spin_lock_irqsave(&dp_priv->event_lock, flag); - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return -ENOENT; } - gndx = dp_priv->event_gndx; - while (dp_priv->event_pndx != gndx) { - todo = &dp_priv->event_list[gndx]; + gndx = msm_dp_priv->event_gndx; + while (msm_dp_priv->event_pndx != gndx) { + todo = &msm_dp_priv->event_list[gndx]; if (todo->event_id == event) { todo->event_id = EV_NO_EVENT; /* deleted */ todo->delay = 0; @@ -232,60 +241,60 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event) gndx++; gndx %= DP_EVENT_Q_MAX; } - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); return 0; } -void dp_display_signal_audio_start(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); reinit_completion(&dp->audio_comp); } -void dp_display_signal_audio_complete(struct msm_dp *dp_display) +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); complete_all(&dp->audio_comp); } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv); +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv); -static int dp_display_bind(struct device *dev, struct device *master, +static int msm_dp_display_bind(struct device *dev, struct device *master, void *data) { int rc = 0; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); struct drm_device *drm = priv->dev; - dp->dp_display.drm_dev = drm; - priv->dp[dp->id] = &dp->dp_display; + dp->msm_dp_display.drm_dev = drm; + priv->dp[dp->id] = &dp->msm_dp_display; dp->drm_dev = drm; dp->aux->drm_dev = drm; - rc = dp_aux_register(dp->aux); + rc = msm_dp_aux_register(dp->aux); if (rc) { DRM_ERROR("DRM DP AUX register failed\n"); goto end; } - rc = dp_register_audio_driver(dev, dp->audio); + rc = msm_dp_register_audio_driver(dev, dp->audio); if (rc) { DRM_ERROR("Audio registration Dp failed\n"); goto end; } - rc = dp_hpd_event_thread_start(dp); + rc = msm_dp_hpd_event_thread_start(dp); if (rc) { DRM_ERROR("Event thread create failed\n"); goto end; @@ -296,44 +305,44 @@ end: return rc; } -static void dp_display_unbind(struct device *dev, struct device *master, +static void msm_dp_display_unbind(struct device *dev, struct device *master, void *data) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); kthread_stop(dp->ev_tsk); of_dp_aux_depopulate_bus(dp->aux); - dp_unregister_audio_driver(dev, dp->audio); - dp_aux_unregister(dp->aux); + msm_dp_unregister_audio_driver(dev, dp->audio); + msm_dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; priv->dp[dp->id] = NULL; } -static const struct component_ops dp_display_comp_ops = { - .bind = dp_display_bind, - .unbind = dp_display_unbind, +static const struct component_ops msm_dp_display_comp_ops = { + .bind = msm_dp_display_bind, + .unbind = msm_dp_display_unbind, }; -static void dp_display_send_hpd_event(struct msm_dp *dp_display) +static void msm_dp_display_send_hpd_event(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct drm_connector *connector; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - connector = dp->dp_display.connector; + connector = dp->msm_dp_display.connector; drm_helper_hpd_irq_event(connector->dev); } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, +static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *dp, bool hpd) { - if ((hpd && dp->dp_display.link_ready) || - (!hpd && !dp->dp_display.link_ready)) { + if ((hpd && dp->msm_dp_display.link_ready) || + (!hpd && !dp->msm_dp_display.link_ready)) { drm_dbg_dp(dp->drm_dev, "HPD already %s\n", (hpd ? "on" : "off")); return 0; @@ -342,139 +351,139 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, /* reset video pattern flag on disconnect */ if (!hpd) { dp->panel->video_test = false; - if (!dp->dp_display.is_edp) - drm_dp_set_subconnector_property(dp->dp_display.connector, + if (!dp->msm_dp_display.is_edp) + drm_dp_set_subconnector_property(dp->msm_dp_display.connector, connector_status_disconnected, dp->panel->dpcd, dp->panel->downstream_ports); } - dp->dp_display.link_ready = hpd; + dp->msm_dp_display.link_ready = hpd; drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", - dp->dp_display.connector_type, hpd); - dp_display_send_hpd_event(&dp->dp_display); + dp->msm_dp_display.connector_type, hpd); + msm_dp_display_send_hpd_event(&dp->msm_dp_display); return 0; } -static int dp_display_process_hpd_high(struct dp_display_private *dp) +static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp) { - struct drm_connector *connector = dp->dp_display.connector; + struct drm_connector *connector = dp->msm_dp_display.connector; const struct drm_display_info *info = &connector->display_info; int rc = 0; - rc = dp_panel_read_sink_caps(dp->panel, connector); + rc = msm_dp_panel_read_sink_caps(dp->panel, connector); if (rc) goto end; - dp_link_process_request(dp->link); + msm_dp_link_process_request(dp->link); - if (!dp->dp_display.is_edp) + if (!dp->msm_dp_display.is_edp) drm_dp_set_subconnector_property(connector, connector_status_connected, dp->panel->dpcd, dp->panel->downstream_ports); - dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; + dp->msm_dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; dp->audio_supported = info->has_audio; - dp_panel_handle_sink_request(dp->panel); + msm_dp_panel_handle_sink_request(dp->panel); /* * set sink to normal operation mode -- D0 * before dpcd read */ - dp_link_psm_config(dp->link, &dp->panel->link_info, false); + msm_dp_link_psm_config(dp->link, &dp->panel->link_info, false); - dp_link_reset_phy_params_vx_px(dp->link); - rc = dp_ctrl_on_link(dp->ctrl); + msm_dp_link_reset_phy_params_vx_px(dp->link); + rc = msm_dp_ctrl_on_link(dp->ctrl); if (rc) { DRM_ERROR("failed to complete DP link training\n"); goto end; } - dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); end: return rc; } -static void dp_display_host_phy_init(struct dp_display_private *dp) +static void msm_dp_display_host_phy_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (!dp->phy_initialized) { - dp_ctrl_phy_init(dp->ctrl); + msm_dp_ctrl_phy_init(dp->ctrl); dp->phy_initialized = true; } } -static void dp_display_host_phy_exit(struct dp_display_private *dp) +static void msm_dp_display_host_phy_exit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); if (dp->phy_initialized) { - dp_ctrl_phy_exit(dp->ctrl); + msm_dp_ctrl_phy_exit(dp->ctrl); dp->phy_initialized = false; } } -static void dp_display_host_init(struct dp_display_private *dp) +static void msm_dp_display_host_init(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_core_clk_enable(dp->ctrl); - dp_ctrl_reset_irq_ctrl(dp->ctrl, true); - dp_aux_init(dp->aux); + msm_dp_ctrl_core_clk_enable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true); + msm_dp_aux_init(dp->aux); dp->core_initialized = true; } -static void dp_display_host_deinit(struct dp_display_private *dp) +static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp) { drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", - dp->dp_display.connector_type, dp->core_initialized, + dp->msm_dp_display.connector_type, dp->core_initialized, dp->phy_initialized); - dp_ctrl_reset_irq_ctrl(dp->ctrl, false); - dp_aux_deinit(dp->aux); - dp_ctrl_core_clk_disable(dp->ctrl); + msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false); + msm_dp_aux_deinit(dp->aux); + msm_dp_ctrl_core_clk_disable(dp->ctrl); dp->core_initialized = false; } -static int dp_display_usbpd_configure_cb(struct device *dev) +static int msm_dp_display_usbpd_configure_cb(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_display_host_phy_init(dp); + msm_dp_display_host_phy_init(dp); - return dp_display_process_hpd_high(dp); + return msm_dp_display_process_hpd_high(dp); } -static int dp_display_notify_disconnect(struct device *dev) +static int msm_dp_display_notify_disconnect(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); return 0; } -static void dp_display_handle_video_request(struct dp_display_private *dp) +static void msm_dp_display_handle_video_request(struct msm_dp_display_private *dp) { if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { dp->panel->video_test = true; - dp_link_send_test_response(dp->link); + msm_dp_link_send_test_response(dp->link); } } -static int dp_display_handle_port_status_changed(struct dp_display_private *dp) +static int msm_dp_display_handle_port_status_changed(struct msm_dp_display_private *dp) { int rc = 0; @@ -482,12 +491,12 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n"); if (dp->hpd_state != ST_DISCONNECTED) { dp->hpd_state = ST_DISCONNECT_PENDING; - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); } } else { if (dp->hpd_state == ST_DISCONNECTED) { dp->hpd_state = ST_MAINLINK_READY; - rc = dp_display_process_hpd_high(dp); + rc = msm_dp_display_process_hpd_high(dp); if (rc) dp->hpd_state = ST_DISCONNECTED; } @@ -496,7 +505,7 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp) return rc; } -static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +static int msm_dp_display_handle_irq_hpd(struct msm_dp_display_private *dp) { u32 sink_request = dp->link->sink_request; @@ -510,48 +519,48 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) } } - dp_ctrl_handle_sink_request(dp->ctrl); + msm_dp_ctrl_handle_sink_request(dp->ctrl); if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) - dp_display_handle_video_request(dp); + msm_dp_display_handle_video_request(dp); return 0; } -static int dp_display_usbpd_attention_cb(struct device *dev) +static int msm_dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; u32 sink_request; - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* check for any test request issued by sink */ - rc = dp_link_process_request(dp->link); + rc = msm_dp_link_process_request(dp->link); if (!rc) { sink_request = dp->link->sink_request; drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request); if (sink_request & DS_PORT_STATUS_CHANGED) - rc = dp_display_handle_port_status_changed(dp); + rc = msm_dp_display_handle_port_status_changed(dp); else - rc = dp_display_handle_irq_hpd(dp); + rc = msm_dp_display_handle_irq_hpd(dp); } return rc; } -static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; int ret; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, true); + msm_dp_aux_enable_xfers(dp->aux, true); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -565,7 +574,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) if (state == ST_DISCONNECT_PENDING) { /* wait until ST_DISCONNECTED */ - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } @@ -577,7 +586,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) return ret; } - ret = dp_display_usbpd_configure_cb(&pdev->dev); + ret = msm_dp_display_usbpd_configure_cb(&pdev->dev); if (ret) { /* link train failed */ dp->hpd_state = ST_DISCONNECTED; pm_runtime_put_sync(&pdev->dev); @@ -586,60 +595,60 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) } drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); /* uevent will complete connection part */ return 0; }; -static void dp_display_handle_plugged_change(struct msm_dp *dp_display, +static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display, bool plugged) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, - struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, + struct msm_dp_display_private, msm_dp_display); /* notify audio subsystem only if sink supports audio */ - if (dp_display->plugged_cb && dp_display->codec_dev && + if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev && dp->audio_supported) - dp_display->plugged_cb(dp_display->codec_dev, plugged); + msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged); } -static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; - dp_aux_enable_xfers(dp->aux, false); + msm_dp_aux_enable_xfers(dp->aux, false); mutex_lock(&dp->event_mutex); state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* unplugged, no more irq_hpd handle */ - dp_del_event(dp, EV_IRQ_HPD_INT); + msm_dp_del_event(dp, EV_IRQ_HPD_INT); if (state == ST_DISCONNECTED) { /* triggered by irq_hdp with sink_count = 0 */ if (dp->link->sink_count == 0) { - dp_display_host_phy_exit(dp); + msm_dp_display_host_phy_exit(dp); } - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_DISCONNECT_PENDING) { mutex_unlock(&dp->event_mutex); return 0; } else if (state == ST_MAINLINK_READY) { - dp_ctrl_off_link(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off_link(dp->ctrl); + msm_dp_display_host_phy_exit(dp); dp->hpd_state = ST_DISCONNECTED; - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); pm_runtime_put_sync(&pdev->dev); mutex_unlock(&dp->event_mutex); return 0; @@ -649,7 +658,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) * We don't need separate work for disconnect as * connect/attention interrupts are disabled */ - dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev); if (state == ST_DISPLAY_OFF) { dp->hpd_state = ST_DISCONNECTED; @@ -658,10 +667,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) } /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(&dp->dp_display, false); + msm_dp_display_handle_plugged_change(&dp->msm_dp_display, false); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); /* uevent will complete disconnection part */ pm_runtime_put_sync(&pdev->dev); @@ -669,7 +678,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) return 0; } -static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data) { u32 state; @@ -678,7 +687,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) /* irq_hpd can happen at either connected or disconnected state */ state = dp->hpd_state; drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); @@ -687,33 +696,33 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) { /* wait until ST_CONNECTED */ - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ mutex_unlock(&dp->event_mutex); return 0; } - dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev); + msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev); drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", - dp->dp_display.connector_type, state); + dp->msm_dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); return 0; } -static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +static void msm_dp_display_deinit_sub_modules(struct msm_dp_display_private *dp) { - dp_audio_put(dp->audio); - dp_panel_put(dp->panel); - dp_aux_put(dp->aux); + msm_dp_audio_put(dp->audio); + msm_dp_panel_put(dp->panel); + msm_dp_aux_put(dp->aux); } -static int dp_init_sub_modules(struct dp_display_private *dp) +static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp) { int rc = 0; - struct device *dev = &dp->dp_display.pdev->dev; - struct dp_panel_in panel_in = { + struct device *dev = &dp->msm_dp_display.pdev->dev; + struct msm_dp_panel_in panel_in = { .dev = dev, }; struct phy *phy; @@ -723,14 +732,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return PTR_ERR(phy); rc = phy_set_mode_ext(phy, PHY_MODE_DP, - dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); + dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); if (rc) { DRM_ERROR("failed to set phy submode, rc = %d\n", rc); dp->catalog = NULL; goto error; } - dp->catalog = dp_catalog_get(dev); + dp->catalog = msm_dp_catalog_get(dev); if (IS_ERR(dp->catalog)) { rc = PTR_ERR(dp->catalog); DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); @@ -738,9 +747,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->aux = dp_aux_get(dev, dp->catalog, + dp->aux = msm_dp_aux_get(dev, dp->catalog, phy, - dp->dp_display.is_edp); + dp->msm_dp_display.is_edp); if (IS_ERR(dp->aux)) { rc = PTR_ERR(dp->aux); DRM_ERROR("failed to initialize aux, rc = %d\n", rc); @@ -748,7 +757,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->link = dp_link_get(dev, dp->aux); + dp->link = msm_dp_link_get(dev, dp->aux); if (IS_ERR(dp->link)) { rc = PTR_ERR(dp->link); DRM_ERROR("failed to initialize link, rc = %d\n", rc); @@ -760,7 +769,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) panel_in.catalog = dp->catalog; panel_in.link = dp->link; - dp->panel = dp_panel_get(&panel_in); + dp->panel = msm_dp_panel_get(&panel_in); if (IS_ERR(dp->panel)) { rc = PTR_ERR(dp->panel); DRM_ERROR("failed to initialize panel, rc = %d\n", rc); @@ -768,7 +777,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_link; } - dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, dp->catalog, phy); if (IS_ERR(dp->ctrl)) { @@ -778,7 +787,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error_ctrl; } - dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog); + dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog); if (IS_ERR(dp->audio)) { rc = PTR_ERR(dp->audio); pr_err("failed to initialize audio, rc = %d\n", rc); @@ -789,51 +798,51 @@ static int dp_init_sub_modules(struct dp_display_private *dp) return rc; error_ctrl: - dp_panel_put(dp->panel); + msm_dp_panel_put(dp->panel); error_link: - dp_aux_put(dp->aux); + msm_dp_aux_put(dp->aux); error: return rc; } -static int dp_display_set_mode(struct msm_dp *dp_display, - struct dp_display_mode *mode) +static int msm_dp_display_set_mode(struct msm_dp *msm_dp_display, + struct msm_dp_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); - dp->panel->dp_mode.bpp = mode->bpp; - dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; - dp_panel_init_panel_info(dp->panel); + drm_mode_copy(&dp->panel->msm_dp_mode.drm_mode, &mode->drm_mode); + dp->panel->msm_dp_mode.bpp = mode->bpp; + dp->panel->msm_dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; + msm_dp_panel_init_panel_info(dp->panel); return 0; } -static int dp_display_enable(struct dp_display_private *dp, bool force_link_train) +static int msm_dp_display_enable(struct msm_dp_display_private *dp, bool force_link_train) { int rc = 0; - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count); - if (dp_display->power_on) { + if (msm_dp_display->power_on) { drm_dbg_dp(dp->drm_dev, "Link already setup, return\n"); return 0; } - rc = dp_ctrl_on_stream(dp->ctrl, force_link_train); + rc = msm_dp_ctrl_on_stream(dp->ctrl, force_link_train); if (!rc) - dp_display->power_on = true; + msm_dp_display->power_on = true; return rc; } -static int dp_display_post_enable(struct msm_dp *dp_display) +static int msm_dp_display_post_enable(struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; u32 rate; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); rate = dp->link->link_params.rate; @@ -843,85 +852,85 @@ static int dp_display_post_enable(struct msm_dp *dp_display) } /* signal the connect event late to synchronize video and display */ - dp_display_handle_plugged_change(dp_display, true); + msm_dp_display_handle_plugged_change(msm_dp_display, true); - if (dp_display->psr_supported) - dp_ctrl_config_psr(dp->ctrl); + if (msm_dp_display->psr_supported) + msm_dp_ctrl_config_psr(dp->ctrl); return 0; } -static int dp_display_disable(struct dp_display_private *dp) +static int msm_dp_display_disable(struct msm_dp_display_private *dp) { - struct msm_dp *dp_display = &dp->dp_display; + struct msm_dp *msm_dp_display = &dp->msm_dp_display; - if (!dp_display->power_on) + if (!msm_dp_display->power_on) return 0; /* wait only if audio was enabled */ - if (dp_display->audio_enabled) { + if (msm_dp_display->audio_enabled) { /* signal the disconnect event */ - dp_display_handle_plugged_change(dp_display, false); + msm_dp_display_handle_plugged_change(msm_dp_display, false); if (!wait_for_completion_timeout(&dp->audio_comp, HZ * 5)) DRM_ERROR("audio comp timeout\n"); } - dp_display->audio_enabled = false; + msm_dp_display->audio_enabled = false; if (dp->link->sink_count == 0) { /* * irq_hpd with sink_count = 0 * hdmi unplugged out of dongle */ - dp_ctrl_off_link_stream(dp->ctrl); + msm_dp_ctrl_off_link_stream(dp->ctrl); } else { /* * unplugged interrupt * dongle unplugged out of DUT */ - dp_ctrl_off(dp->ctrl); - dp_display_host_phy_exit(dp); + msm_dp_ctrl_off(dp->ctrl); + msm_dp_display_host_phy_exit(dp); } - dp_display->power_on = false; + msm_dp_display->power_on = false; drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count); return 0; } -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev) { bool plugged; - dp_display->plugged_cb = fn; - dp_display->codec_dev = codec_dev; - plugged = dp_display->link_ready; - dp_display_handle_plugged_change(dp_display, plugged); + msm_dp_display->plugged_cb = fn; + msm_dp_display->codec_dev = codec_dev; + plugged = msm_dp_display->link_ready; + msm_dp_display_handle_plugged_change(msm_dp_display, plugged); return 0; } /** - * dp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { const u32 num_components = 3, default_bpp = 24; - struct dp_display_private *dp_display; - struct dp_link_info *link_info; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_link_info *link_info; u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -931,18 +940,18 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; - dp_display = container_of(dp, struct dp_display_private, dp_display); - link_info = &dp_display->panel->link_info; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + link_info = &msm_dp_display->panel->link_info; if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - dp_display->panel->vsc_sdp_supported) + msm_dp_display->panel->vsc_sdp_supported) mode_pclk_khz /= 2; mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; - mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp = msm_dp_panel_get_mode_bpp(msm_dp_display->panel, mode_bpp, mode_pclk_khz); mode_rate_khz = mode_pclk_khz * mode_bpp; @@ -954,50 +963,50 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -int dp_display_get_modes(struct msm_dp *dp) +int msm_dp_display_get_modes(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_panel_get_modes(dp_display->panel, + return msm_dp_panel_get_modes(msm_dp_display->panel, dp->connector); } -bool dp_display_check_video_test(struct msm_dp *dp) +bool msm_dp_display_check_video_test(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_display->panel->video_test; + return msm_dp_display->panel->video_test; } -int dp_display_get_test_bpp(struct msm_dp *dp) +int msm_dp_display_get_test_bpp(struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; if (!dp) { DRM_ERROR("invalid params\n"); return 0; } - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - return dp_link_bit_depth_to_bpp( - dp_display->link->test_video.test_bit_depth); + return msm_dp_link_bit_depth_to_bpp( + msm_dp_display->link->test_video.test_bit_depth); } void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) { - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); /* * if we are reading registers we need the link clocks to be on @@ -1006,65 +1015,65 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) * power_on status before dumping DP registers to avoid crash due * to unclocked access */ - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (!dp->power_on) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - dp_catalog_snapshot(dp_display->catalog, disp_state); + msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_display_set_psr(struct msm_dp *dp_display, bool enter) +void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - if (!dp_display) { + if (!msm_dp_display) { DRM_ERROR("invalid params\n"); return; } - dp = container_of(dp_display, struct dp_display_private, dp_display); - dp_ctrl_set_psr(dp->ctrl, enter); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + msm_dp_ctrl_set_psr(dp->ctrl, enter); } static int hpd_event_thread(void *data) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; unsigned long flag; - struct dp_event *todo; + struct msm_dp_event *todo; int timeout_mode = 0; - dp_priv = (struct dp_display_private *)data; + msm_dp_priv = (struct msm_dp_display_private *)data; while (1) { if (timeout_mode) { - wait_event_timeout(dp_priv->event_q, - (dp_priv->event_pndx == dp_priv->event_gndx) || + wait_event_timeout(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) || kthread_should_stop(), EVENT_TIMEOUT); } else { - wait_event_interruptible(dp_priv->event_q, - (dp_priv->event_pndx != dp_priv->event_gndx) || + wait_event_interruptible(msm_dp_priv->event_q, + (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) || kthread_should_stop()); } if (kthread_should_stop()) break; - spin_lock_irqsave(&dp_priv->event_lock, flag); - todo = &dp_priv->event_list[dp_priv->event_gndx]; + spin_lock_irqsave(&msm_dp_priv->event_lock, flag); + todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx]; if (todo->delay) { - struct dp_event *todo_next; + struct msm_dp_event *todo_next; - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; /* re enter delay event into q */ - todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; - dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++]; + msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX; todo_next->event_id = todo->event_id; todo_next->data = todo->data; todo_next->delay = todo->delay - 1; @@ -1075,33 +1084,33 @@ static int hpd_event_thread(void *data) /* switch to timeout mode */ timeout_mode = 1; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } /* timeout with no events in q */ - if (dp_priv->event_pndx == dp_priv->event_gndx) { - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) { + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); continue; } - dp_priv->event_gndx++; - dp_priv->event_gndx %= DP_EVENT_Q_MAX; + msm_dp_priv->event_gndx++; + msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX; timeout_mode = 0; - spin_unlock_irqrestore(&dp_priv->event_lock, flag); + spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag); switch (todo->event_id) { case EV_HPD_PLUG_INT: - dp_hpd_plug_handle(dp_priv, todo->data); + msm_dp_hpd_plug_handle(msm_dp_priv, todo->data); break; case EV_HPD_UNPLUG_INT: - dp_hpd_unplug_handle(dp_priv, todo->data); + msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data); break; case EV_IRQ_HPD_INT: - dp_irq_hpd_handle(dp_priv, todo->data); + msm_dp_irq_hpd_handle(msm_dp_priv, todo->data); break; case EV_USER_NOTIFICATION: - dp_display_send_hpd_notification(dp_priv, + msm_dp_display_send_hpd_notification(msm_dp_priv, todo->data); break; default: @@ -1112,22 +1121,22 @@ static int hpd_event_thread(void *data) return 0; } -static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv) +static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv) { /* set event q to empty */ - dp_priv->event_gndx = 0; - dp_priv->event_pndx = 0; + msm_dp_priv->event_gndx = 0; + msm_dp_priv->event_pndx = 0; - dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); - if (IS_ERR(dp_priv->ev_tsk)) - return PTR_ERR(dp_priv->ev_tsk); + msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler"); + if (IS_ERR(msm_dp_priv->ev_tsk)) + return PTR_ERR(msm_dp_priv->ev_tsk); return 0; } -static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id) { - struct dp_display_private *dp = dev_id; + struct msm_dp_display_private *dp = dev_id; irqreturn_t ret = IRQ_NONE; u32 hpd_isr_status; @@ -1136,43 +1145,43 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog); if (hpd_isr_status & 0x0F) { drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n", - dp->dp_display.connector_type, hpd_isr_status); + dp->msm_dp_display.connector_type, hpd_isr_status); /* hpd related interrupts */ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { - dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); } if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); ret = IRQ_HANDLED; } /* DP controller isr */ - ret |= dp_ctrl_isr(dp->ctrl); + ret |= msm_dp_ctrl_isr(dp->ctrl); /* DP aux isr */ - ret |= dp_aux_isr(dp->aux); + ret |= msm_dp_aux_isr(dp->aux); return ret; } -static int dp_display_request_irq(struct dp_display_private *dp) +static int msm_dp_display_request_irq(struct msm_dp_display_private *dp) { int rc = 0; - struct platform_device *pdev = dp->dp_display.pdev; + struct platform_device *pdev = dp->msm_dp_display.pdev; dp->irq = platform_get_irq(pdev, 0); if (dp->irq < 0) { @@ -1180,7 +1189,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return dp->irq; } - rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler, + rc = devm_request_irq(&pdev->dev, dp->irq, msm_dp_display_irq_handler, IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN, "dp_display_isr", dp); @@ -1193,7 +1202,7 @@ static int dp_display_request_irq(struct dp_display_private *dp) return 0; } -static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev) +static const struct msm_dp_desc *msm_dp_display_get_desc(struct platform_device *pdev) { const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev); struct resource *res; @@ -1212,7 +1221,7 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde return NULL; } -static int dp_display_probe_tail(struct device *dev) +static int msm_dp_display_probe_tail(struct device *dev) { struct msm_dp *dp = dev_get_drvdata(dev); int ret; @@ -1232,19 +1241,19 @@ static int dp_display_probe_tail(struct device *dev) return ret; } - ret = component_add(dev, &dp_display_comp_ops); + ret = component_add(dev, &msm_dp_display_comp_ops); if (ret) DRM_ERROR("component add failed, rc=%d\n", ret); return ret; } -static int dp_auxbus_done_probe(struct drm_dp_aux *aux) +static int msm_dp_auxbus_done_probe(struct drm_dp_aux *aux) { - return dp_display_probe_tail(aux->dev); + return msm_dp_display_probe_tail(aux->dev); } -static int dp_display_get_connector_type(struct platform_device *pdev, +static int msm_dp_display_get_connector_type(struct platform_device *pdev, const struct msm_dp_desc *desc) { struct device_node *node = pdev->dev.of_node; @@ -1263,10 +1272,10 @@ static int dp_display_get_connector_type(struct platform_device *pdev, return connector_type; } -static int dp_display_probe(struct platform_device *pdev) +static int msm_dp_display_probe(struct platform_device *pdev) { int rc = 0; - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct msm_dp_desc *desc; if (!pdev || !pdev->dev.of_node) { @@ -1278,18 +1287,18 @@ static int dp_display_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; - desc = dp_display_get_desc(pdev); + desc = msm_dp_display_get_desc(pdev); if (!desc) return -EINVAL; - dp->dp_display.pdev = pdev; + dp->msm_dp_display.pdev = pdev; dp->id = desc->id; - dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc); + dp->msm_dp_display.connector_type = msm_dp_display_get_connector_type(pdev, desc); dp->wide_bus_supported = desc->wide_bus_supported; - dp->dp_display.is_edp = - (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); + dp->msm_dp_display.is_edp = + (dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); - rc = dp_init_sub_modules(dp); + rc = msm_dp_init_sub_modules(dp); if (rc) { DRM_ERROR("init sub module failed\n"); return -EPROBE_DEFER; @@ -1301,28 +1310,28 @@ static int dp_display_probe(struct platform_device *pdev) spin_lock_init(&dp->event_lock); /* Store DP audio handle inside DP display */ - dp->dp_display.dp_audio = dp->audio; + dp->msm_dp_display.msm_dp_audio = dp->audio; init_completion(&dp->audio_comp); - platform_set_drvdata(pdev, &dp->dp_display); + platform_set_drvdata(pdev, &dp->msm_dp_display); rc = devm_pm_runtime_enable(&pdev->dev); if (rc) goto err; - rc = dp_display_request_irq(dp); + rc = msm_dp_display_request_irq(dp); if (rc) goto err; - if (dp->dp_display.is_edp) { - rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe); + if (dp->msm_dp_display.is_edp) { + rc = devm_of_dp_aux_populate_bus(dp->aux, msm_dp_auxbus_done_probe); if (rc) { DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc); goto err; } } else { - rc = dp_display_probe_tail(&pdev->dev); + rc = msm_dp_display_probe_tail(&pdev->dev); if (rc) goto err; } @@ -1330,70 +1339,70 @@ static int dp_display_probe(struct platform_device *pdev) return rc; err: - dp_display_deinit_sub_modules(dp); + msm_dp_display_deinit_sub_modules(dp); return rc; } -static void dp_display_remove(struct platform_device *pdev) +static void msm_dp_display_remove(struct platform_device *pdev) { - struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); - component_del(&pdev->dev, &dp_display_comp_ops); - dp_display_deinit_sub_modules(dp); + component_del(&pdev->dev, &msm_dp_display_comp_ops); + msm_dp_display_deinit_sub_modules(dp); platform_set_drvdata(pdev, NULL); } -static int dp_pm_runtime_suspend(struct device *dev) +static int msm_dp_pm_runtime_suspend(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); disable_irq(dp->irq); - if (dp->dp_display.is_edp) { - dp_display_host_phy_exit(dp); - dp_catalog_ctrl_hpd_disable(dp->catalog); + if (dp->msm_dp_display.is_edp) { + msm_dp_display_host_phy_exit(dp); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); } - dp_display_host_deinit(dp); + msm_dp_display_host_deinit(dp); return 0; } -static int dp_pm_runtime_resume(struct device *dev) +static int msm_dp_pm_runtime_resume(struct device *dev) { - struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_dp_display_private *dp = dev_get_dp_display_private(dev); /* * for eDP, host cotroller, HPD block and PHY are enabled here * but with HPD irq disabled * * for DP, only host controller is enabled here. - * HPD block is enabled at dp_bridge_hpd_enable() + * HPD block is enabled at msm_dp_bridge_hpd_enable() * PHY will be enabled at plugin handler later */ - dp_display_host_init(dp); - if (dp->dp_display.is_edp) { - dp_catalog_ctrl_hpd_enable(dp->catalog); - dp_display_host_phy_init(dp); + msm_dp_display_host_init(dp); + if (dp->msm_dp_display.is_edp) { + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_display_host_phy_init(dp); } enable_irq(dp->irq); return 0; } -static const struct dev_pm_ops dp_pm_ops = { - SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL) +static const struct dev_pm_ops msm_dp_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dp_pm_runtime_suspend, msm_dp_pm_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; -static struct platform_driver dp_display_driver = { - .probe = dp_display_probe, - .remove_new = dp_display_remove, +static struct platform_driver msm_dp_display_driver = { + .probe = msm_dp_display_probe, + .remove_new = msm_dp_display_remove, .driver = { .name = "msm-dp-display", - .of_match_table = dp_dt_match, + .of_match_table = msm_dp_dt_match, .suppress_bind_attrs = true, - .pm = &dp_pm_ops, + .pm = &msm_dp_pm_ops, }, }; @@ -1401,7 +1410,7 @@ int __init msm_dp_register(void) { int ret; - ret = platform_driver_register(&dp_display_driver); + ret = platform_driver_register(&msm_dp_display_driver); if (ret) DRM_ERROR("Dp display driver register failed"); @@ -1410,294 +1419,294 @@ int __init msm_dp_register(void) void __exit msm_dp_unregister(void) { - platform_driver_unregister(&dp_display_driver); + platform_driver_unregister(&msm_dp_display_driver); } -bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display, +bool msm_dp_is_yuv_420_enabled(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; const struct drm_display_info *info; - dp = container_of(dp_display, struct dp_display_private, dp_display); - info = &dp_display->connector->display_info; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + info = &msm_dp_display->connector->display_info; return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode); } -bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display, +bool msm_dp_needs_periph_flush(const struct msm_dp *msm_dp_display, const struct drm_display_mode *mode) { - return msm_dp_is_yuv_420_enabled(dp_display, mode); + return msm_dp_is_yuv_420_enabled(msm_dp_display, mode); } -bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) +bool msm_dp_wide_bus_available(const struct msm_dp *msm_dp_display) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; - dp = container_of(dp_display, struct dp_display_private, dp_display); + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - if (dp->dp_mode.out_fmt_is_yuv_420) + if (dp->msm_dp_mode.out_fmt_is_yuv_420) return false; return dp->wide_bus_supported; } -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp) +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *root, bool is_edp) { - struct dp_display_private *dp; + struct msm_dp_display_private *dp; struct device *dev; int rc; - dp = container_of(dp_display, struct dp_display_private, dp_display); - dev = &dp->dp_display.pdev->dev; + dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); + dev = &dp->msm_dp_display.pdev->dev; - rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp); + rc = msm_dp_debug_init(dev, dp->panel, dp->link, dp->msm_dp_display.connector, root, is_edp); if (rc) DRM_ERROR("failed to initialize debug, rc = %d\n", rc); } -int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, +int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev, struct drm_encoder *encoder, bool yuv_supported) { - struct dp_display_private *dp_priv; + struct msm_dp_display_private *msm_dp_priv; int ret; - dp_display->drm_dev = dev; + msm_dp_display->drm_dev = dev; - dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + msm_dp_priv = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); - ret = dp_bridge_init(dp_display, dev, encoder); + ret = msm_dp_bridge_init(msm_dp_display, dev, encoder, yuv_supported); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create dp bridge: %d\n", ret); return ret; } - dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported); - if (IS_ERR(dp_display->connector)) { - ret = PTR_ERR(dp_display->connector); + msm_dp_display->connector = msm_dp_drm_connector_init(msm_dp_display, encoder); + if (IS_ERR(msm_dp_display->connector)) { + ret = PTR_ERR(msm_dp_display->connector); DRM_DEV_ERROR(dev->dev, "failed to create dp connector: %d\n", ret); - dp_display->connector = NULL; + msm_dp_display->connector = NULL; return ret; } - dp_priv->panel->connector = dp_display->connector; + msm_dp_priv->panel->connector = msm_dp_display->connector; return 0; } -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; int rc = 0; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; u32 state; bool force_link_train = false; - dp_display = container_of(dp, struct dp_display_private, dp_display); - if (!dp_display->dp_mode.drm_mode.clock) { + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + if (!msm_dp_display->msm_dp_mode.drm_mode.clock) { DRM_ERROR("invalid params\n"); return; } if (dp->is_edp) - dp_hpd_plug_handle(dp_display, 0); + msm_dp_hpd_plug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); if (pm_runtime_resume_and_get(&dp->pdev->dev)) { DRM_ERROR("failed to pm_runtime_resume\n"); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - rc = dp_display_set_mode(dp, &dp_display->dp_mode); + rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode); if (rc) { DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); return; } - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISPLAY_OFF) { - dp_display_host_phy_init(dp_display); + msm_dp_display_host_phy_init(msm_dp_display); force_link_train = true; } - dp_display_enable(dp_display, force_link_train); + msm_dp_display_enable(msm_dp_display, force_link_train); - rc = dp_display_post_enable(dp); + rc = msm_dp_display_post_enable(dp); if (rc) { DRM_ERROR("DP display post enable failed, rc=%d\n", rc); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); } /* completed connection */ - dp_display->hpd_state = ST_CONNECTED; + msm_dp_display->hpd_state = ST_CONNECTED; drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); - dp_ctrl_push_idle(dp_display->ctrl); + msm_dp_ctrl_push_idle(msm_dp_display->ctrl); } -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; u32 state; - struct dp_display_private *dp_display; + struct msm_dp_display_private *msm_dp_display; - dp_display = container_of(dp, struct dp_display_private, dp_display); + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); if (dp->is_edp) - dp_hpd_unplug_handle(dp_display, 0); + msm_dp_hpd_unplug_handle(msm_dp_display, 0); - mutex_lock(&dp_display->event_mutex); + mutex_lock(&msm_dp_display->event_mutex); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", dp->connector_type, state); - dp_display_disable(dp_display); + msm_dp_display_disable(msm_dp_display); - state = dp_display->hpd_state; + state = msm_dp_display->hpd_state; if (state == ST_DISCONNECT_PENDING) { /* completed disconnection */ - dp_display->hpd_state = ST_DISCONNECTED; + msm_dp_display->hpd_state = ST_DISCONNECTED; } else { - dp_display->hpd_state = ST_DISPLAY_OFF; + msm_dp_display->hpd_state = ST_DISPLAY_OFF; } drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); pm_runtime_put_sync(&dp->pdev->dev); - mutex_unlock(&dp_display->event_mutex); + mutex_unlock(&msm_dp_display->event_mutex); } -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; - struct dp_display_private *dp_display; - struct dp_panel *dp_panel; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *msm_dp_display; + struct msm_dp_panel *msm_dp_panel; - dp_display = container_of(dp, struct dp_display_private, dp_display); - dp_panel = dp_display->panel; + msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); + msm_dp_panel = msm_dp_display->panel; - memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + memset(&msm_dp_display->msm_dp_mode, 0x0, sizeof(struct msm_dp_display_mode)); - if (dp_display_check_video_test(dp)) - dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + if (msm_dp_display_check_video_test(dp)) + msm_dp_display->msm_dp_mode.bpp = msm_dp_display_get_test_bpp(dp); else /* Default num_components per pixel = 3 */ - dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + msm_dp_display->msm_dp_mode.bpp = dp->connector->display_info.bpc * 3; - if (!dp_display->dp_mode.bpp) - dp_display->dp_mode.bpp = 24; /* Default bpp */ + if (!msm_dp_display->msm_dp_mode.bpp) + msm_dp_display->msm_dp_mode.bpp = 24; /* Default bpp */ - drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + drm_mode_copy(&msm_dp_display->msm_dp_mode.drm_mode, adjusted_mode); - dp_display->dp_mode.v_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + msm_dp_display->msm_dp_mode.v_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); - dp_display->dp_mode.h_active_low = - !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); + msm_dp_display->msm_dp_mode.h_active_low = + !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); - dp_display->dp_mode.out_fmt_is_yuv_420 = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 = drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) && - dp_panel->vsc_sdp_supported; + msm_dp_panel->vsc_sdp_supported; /* populate wide_bus_support to different layers */ - dp_display->ctrl->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; - dp_display->catalog->wide_bus_en = - dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported; + msm_dp_display->ctrl->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; + msm_dp_display->catalog->wide_bus_en = + msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported; } -void dp_bridge_hpd_enable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* * this is for external DP with hpd irq enabled case, - * step-1: dp_pm_runtime_resume() enable dp host only + * step-1: msm_dp_pm_runtime_resume() enable dp host only * step-2: enable hdp block and have hpd irq enabled here * step-3: waiting for plugin irq while phy is not initialized * step-4: DP PHY is initialized at plugin handler before link training * */ mutex_lock(&dp->event_mutex); - if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) { + if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) { DRM_ERROR("failed to resume power\n"); mutex_unlock(&dp->event_mutex); return; } - dp_catalog_ctrl_hpd_enable(dp->catalog); + msm_dp_catalog_ctrl_hpd_enable(dp->catalog); /* enable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); - dp_display->internal_hpd = true; + msm_dp_display->internal_hpd = true; mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_disable(struct drm_bridge *bridge) +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); mutex_lock(&dp->event_mutex); /* disable HDP interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); - dp_catalog_ctrl_hpd_disable(dp->catalog); + msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + msm_dp_catalog_ctrl_hpd_disable(dp->catalog); - dp_display->internal_hpd = false; + msm_dp_display->internal_hpd = false; - pm_runtime_put_sync(&dp_display->pdev->dev); + pm_runtime_put_sync(&msm_dp_display->pdev->dev); mutex_unlock(&dp->event_mutex); } -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { - struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); - struct msm_dp *dp_display = dp_bridge->dp_display; - struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge); + struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display; + struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display); /* Without next_bridge interrupts are handled by the DP core directly */ - if (dp_display->internal_hpd) + if (msm_dp_display->internal_hpd) return; - if (!dp_display->link_ready && status == connector_status_connected) - dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); - else if (dp_display->link_ready && status == connector_status_disconnected) - dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + if (!msm_dp_display->link_ready && status == connector_status_connected) + msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + else if (msm_dp_display->link_ready && status == connector_status_disconnected) + msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index ec7fa67e0569..ecbc2d92f546 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -27,18 +27,18 @@ struct msm_dp { hdmi_codec_plugged_cb plugged_cb; - struct dp_audio *dp_audio; + struct msm_dp_audio *msm_dp_audio; bool psr_supported; }; -int dp_display_set_plugged_cb(struct msm_dp *dp_display, +int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev); -int dp_display_get_modes(struct msm_dp *dp_display); -bool dp_display_check_video_test(struct msm_dp *dp_display); -int dp_display_get_test_bpp(struct msm_dp *dp_display); -void dp_display_signal_audio_start(struct msm_dp *dp_display); -void dp_display_signal_audio_complete(struct msm_dp *dp_display); -void dp_display_set_psr(struct msm_dp *dp, bool enter); -void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp); +int msm_dp_display_get_modes(struct msm_dp *msm_dp_display); +bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display); +int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display); +void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display); +void msm_dp_display_set_psr(struct msm_dp *dp, bool enter); +void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *dentry, bool is_edp); #endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 1b9be5bd97f1..d3e241ea6941 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -14,15 +14,15 @@ #include "dp_drm.h" /** - * dp_bridge_detect - callback to determine if connector is connected + * msm_dp_bridge_detect - callback to determine if connector is connected * @bridge: Pointer to drm bridge structure * Returns: Bridge's 'is connected' status */ -static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -31,14 +31,14 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) connector_status_disconnected; } -static int dp_bridge_atomic_check(struct drm_bridge *bridge, +static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct msm_dp *dp; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", (dp->link_ready) ? "true" : "false"); @@ -62,12 +62,12 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge, /** - * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() + * msm_dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() * @bridge: Poiner to drm bridge * @connector: Pointer to drm connector structure * Returns: Number of modes added */ -static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) +static int msm_dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { int rc = 0; struct msm_dp *dp; @@ -75,11 +75,11 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * if (!connector) return 0; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; /* pluggable case assumes EDID is read when HPD */ if (dp->link_ready) { - rc = dp_display_get_modes(dp); + rc = msm_dp_display_get_modes(dp); if (rc <= 0) { DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); return rc; @@ -90,37 +90,37 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * return rc; } -static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, false); + msm_dp_display_debugfs_init(dp, root, false); } -static const struct drm_bridge_funcs dp_bridge_ops = { +static const struct drm_bridge_funcs msm_dp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_enable = dp_bridge_atomic_enable, - .atomic_disable = dp_bridge_atomic_disable, - .atomic_post_disable = dp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = dp_bridge_mode_valid, - .get_modes = dp_bridge_get_modes, - .detect = dp_bridge_detect, - .atomic_check = dp_bridge_atomic_check, - .hpd_enable = dp_bridge_hpd_enable, - .hpd_disable = dp_bridge_hpd_disable, - .hpd_notify = dp_bridge_hpd_notify, - .debugfs_init = dp_bridge_debugfs_init, + .atomic_enable = msm_dp_bridge_atomic_enable, + .atomic_disable = msm_dp_bridge_atomic_disable, + .atomic_post_disable = msm_dp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_dp_bridge_mode_valid, + .get_modes = msm_dp_bridge_get_modes, + .detect = msm_dp_bridge_detect, + .atomic_check = msm_dp_bridge_atomic_check, + .hpd_enable = msm_dp_bridge_hpd_enable, + .hpd_disable = msm_dp_bridge_hpd_disable, + .hpd_notify = msm_dp_bridge_hpd_notify, + .debugfs_init = msm_dp_bridge_debugfs_init, }; -static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, +static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(drm_bridge)->msm_dp_display; if (WARN_ON(!conn_state)) return -ENODEV; @@ -136,18 +136,18 @@ static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, return 0; } -static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; /* * Check the old state of the crtc to determine if the panel - * was put into psr state previously by the edp_bridge_atomic_disable. + * was put into psr state previously by the msm_edp_bridge_atomic_disable. * If the panel is in psr, just exit psr state and skip the full * bridge enable sequence. */ @@ -159,21 +159,21 @@ static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } - dp_bridge_atomic_enable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL; - struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); - struct msm_dp *dp = dp_bridge->dp_display; + struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = msm_dp_bridge->msm_dp_display; crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, drm_bridge->encoder); @@ -194,24 +194,24 @@ static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, * If old crtc state is active, then this is a display disable * call while the sink is in psr state. So, exit psr here. * The eDP controller will be disabled in the - * edp_bridge_atomic_post_disable function. + * msm_edp_bridge_atomic_post_disable function. * * We observed sink is stuck in self refresh if psr exit is skipped * when display disable occurs while the sink is in psr state. */ if (new_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, true); + msm_dp_display_set_psr(dp, true); return; } else if (old_crtc_state->self_refresh_active) { - dp_display_set_psr(dp, false); + msm_dp_display_set_psr(dp, false); return; } out: - dp_bridge_atomic_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state); } -static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *atomic_state = old_bridge_state->base.state; @@ -228,29 +228,29 @@ static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, return; /* - * Self refresh mode is already set in edp_bridge_atomic_disable. + * Self refresh mode is already set in msm_edp_bridge_atomic_disable. */ if (new_crtc_state->self_refresh_active) return; - dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); + msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); } /** - * edp_bridge_mode_valid - callback to determine if specified mode is valid + * msm_edp_bridge_mode_valid - callback to determine if specified mode is valid * @bridge: Pointer to drm bridge structure * @info: display info * @mode: Pointer to drm mode structure * Returns: Validity status for specified mode */ -static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, +static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct msm_dp *dp; int mode_pclk_khz = mode->clock; - dp = to_dp_bridge(bridge)->dp_display; + dp = to_dp_bridge(bridge)->msm_dp_display; if (!dp || !mode_pclk_khz || !dp->connector) { DRM_ERROR("invalid params\n"); @@ -268,42 +268,43 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +static void msm_edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { - struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display; - dp_display_debugfs_init(dp, root, true); + msm_dp_display_debugfs_init(dp, root, true); } -static const struct drm_bridge_funcs edp_bridge_ops = { - .atomic_enable = edp_bridge_atomic_enable, - .atomic_disable = edp_bridge_atomic_disable, - .atomic_post_disable = edp_bridge_atomic_post_disable, - .mode_set = dp_bridge_mode_set, - .mode_valid = edp_bridge_mode_valid, +static const struct drm_bridge_funcs msm_edp_bridge_ops = { + .atomic_enable = msm_edp_bridge_atomic_enable, + .atomic_disable = msm_edp_bridge_atomic_disable, + .atomic_post_disable = msm_edp_bridge_atomic_post_disable, + .mode_set = msm_dp_bridge_mode_set, + .mode_valid = msm_edp_bridge_mode_valid, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_check = edp_bridge_atomic_check, - .debugfs_init = edp_bridge_debugfs_init, + .atomic_check = msm_edp_bridge_atomic_check, + .debugfs_init = msm_edp_bridge_debugfs_init, }; -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder) +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, bool yuv_supported) { int rc; - struct msm_dp_bridge *dp_bridge; + struct msm_dp_bridge *msm_dp_bridge; struct drm_bridge *bridge; - dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL); - if (!dp_bridge) + msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL); + if (!msm_dp_bridge) return -ENOMEM; - dp_bridge->dp_display = dp_display; + msm_dp_bridge->msm_dp_display = msm_dp_display; - bridge = &dp_bridge->bridge; - bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops; - bridge->type = dp_display->connector_type; + bridge = &msm_dp_bridge->bridge; + bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops; + bridge->type = msm_dp_display->connector_type; + bridge->ycbcr_420_allowed = yuv_supported; /* * Many ops only make sense for DP. Why? @@ -316,7 +317,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, * allows the panel driver to properly power itself on to read the * modes. */ - if (!dp_display->is_edp) { + if (!msm_dp_display->is_edp) { bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_HPD | @@ -337,9 +338,9 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, return rc; } - if (dp_display->next_bridge) { + if (msm_dp_display->next_bridge) { rc = drm_bridge_attach(encoder, - dp_display->next_bridge, bridge, + msm_dp_display->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (rc < 0) { DRM_ERROR("failed to attach panel bridge: %d\n", rc); @@ -351,21 +352,18 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, } /* connector initialization */ -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported) +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder) { struct drm_connector *connector = NULL; - connector = drm_bridge_connector_init(dp_display->drm_dev, encoder); + connector = drm_bridge_connector_init(msm_dp_display->drm_dev, encoder); if (IS_ERR(connector)) return connector; - if (!dp_display->is_edp) + if (!msm_dp_display->is_edp) drm_connector_attach_dp_subconnector_property(connector); - if (yuv_supported) - connector->ycbcr_420_allowed = true; - drm_connector_attach_encoder(connector, encoder); return connector; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h index 45e57ac25a4d..8eae2f74839f 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.h +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -14,31 +14,32 @@ struct msm_dp_bridge { struct drm_bridge bridge; - struct msm_dp *dp_display; + struct msm_dp *msm_dp_display; }; #define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge) -struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder, - bool yuv_supported); -int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, - struct drm_encoder *encoder); +struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display, + struct drm_encoder *encoder); +int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, + struct drm_encoder *encoder, + bool yuv_supported); -void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, +void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, struct drm_bridge_state *old_bridge_state); -enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, +enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode); -void dp_bridge_mode_set(struct drm_bridge *drm_bridge, +void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode); -void dp_bridge_hpd_enable(struct drm_bridge *bridge); -void dp_bridge_hpd_disable(struct drm_bridge *bridge); -void dp_bridge_hpd_notify(struct drm_bridge *bridge, +void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge); +void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status); #endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index d8967615d84d..1a1fbb2d7d4f 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -28,25 +28,25 @@ enum audio_pattern_type { AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, }; -struct dp_link_request { +struct msm_dp_link_request { u32 test_requested; u32 test_link_rate; u32 test_lane_count; }; -struct dp_link_private { +struct msm_dp_link_private { u32 prev_sink_count; struct drm_device *drm_dev; struct drm_dp_aux *aux; - struct dp_link dp_link; + struct msm_dp_link msm_dp_link; - struct dp_link_request request; + struct msm_dp_link_request request; struct mutex psm_mutex; u8 link_status[DP_LINK_STATUS_SIZE]; }; -static int dp_aux_link_power_up(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_up(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; ssize_t len; @@ -73,8 +73,8 @@ static int dp_aux_link_power_up(struct drm_dp_aux *aux, return 0; } -static int dp_aux_link_power_down(struct drm_dp_aux *aux, - struct dp_link_info *link) +static int msm_dp_aux_link_power_down(struct drm_dp_aux *aux, + struct msm_dp_link_info *link) { u8 value; int err; @@ -96,7 +96,7 @@ static int dp_aux_link_power_down(struct drm_dp_aux *aux, return 0; } -static int dp_link_get_period(struct dp_link_private *link, int const addr) +static int msm_dp_link_get_period(struct msm_dp_link_private *link, int const addr) { int ret = 0; u8 data; @@ -122,19 +122,19 @@ exit: return ret; } -static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +static int msm_dp_link_parse_audio_channel_period(struct msm_dp_link_private *link) { int ret = 0; - struct dp_link_test_audio *req = &link->dp_link.test_audio; + struct msm_dp_link_test_audio *req = &link->msm_dp_link.test_audio; - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_1 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); if (ret == -EINVAL) goto exit; @@ -142,42 +142,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret); /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_3 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_4 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_5 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_6 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); if (ret == -EINVAL) goto exit; req->test_audio_period_ch_7 = ret; drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret); - ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); if (ret == -EINVAL) goto exit; @@ -187,7 +187,7 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_type(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -208,13 +208,13 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_pattern_type = data; + link->msm_dp_link.test_audio.test_audio_pattern_type = data; drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data); exit: return ret; } -static int dp_link_parse_audio_mode(struct dp_link_private *link) +static int msm_dp_link_parse_audio_mode(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -248,8 +248,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link) goto exit; } - link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; - link->dp_link.test_audio.test_audio_channel_count = channel_count; + link->msm_dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->msm_dp_link.test_audio.test_audio_channel_count = channel_count; drm_dbg_dp(link->drm_dev, "sampling_rate = 0x%x, channel_count = 0x%x\n", sampling_rate, channel_count); @@ -257,25 +257,25 @@ exit: return ret; } -static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_audio_pattern_params(struct msm_dp_link_private *link) { int ret = 0; - ret = dp_link_parse_audio_mode(link); + ret = msm_dp_link_parse_audio_mode(link); if (ret) goto exit; - ret = dp_link_parse_audio_pattern_type(link); + ret = msm_dp_link_parse_audio_pattern_type(link); if (ret) goto exit; - ret = dp_link_parse_audio_channel_period(link); + ret = msm_dp_link_parse_audio_channel_period(link); exit: return ret; } -static bool dp_link_is_video_pattern_valid(u32 pattern) +static bool msm_dp_link_is_video_pattern_valid(u32 pattern) { switch (pattern) { case DP_NO_TEST_PATTERN: @@ -289,12 +289,12 @@ static bool dp_link_is_video_pattern_valid(u32 pattern) } /** - * dp_link_is_bit_depth_valid() - validates the bit depth requested + * msm_dp_link_is_bit_depth_valid() - validates the bit depth requested * @tbd: bit depth requested by the sink * * Returns true if the requested bit depth is supported. */ -static bool dp_link_is_bit_depth_valid(u32 tbd) +static bool msm_dp_link_is_bit_depth_valid(u32 tbd) { /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ switch (tbd) { @@ -307,7 +307,7 @@ static bool dp_link_is_bit_depth_valid(u32 tbd) } } -static int dp_link_parse_timing_params1(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params1(struct msm_dp_link_private *link, int addr, int len, u32 *val) { u8 bp[2]; @@ -328,7 +328,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params2(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params2(struct msm_dp_link_private *link, int addr, int len, u32 *val1, u32 *val2) { @@ -351,7 +351,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link, return 0; } -static int dp_link_parse_timing_params3(struct dp_link_private *link, +static int msm_dp_link_parse_timing_params3(struct msm_dp_link_private *link, int addr, u32 *val) { u8 bp; @@ -369,13 +369,13 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link, } /** - * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD + * msm_dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the video link pattern and the link * bit depth requested by the sink and, and if the values parsed are valid. */ -static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +static int msm_dp_link_parse_video_pattern_params(struct msm_dp_link_private *link) { int ret = 0; ssize_t rlen; @@ -388,13 +388,13 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) return rlen; } - if (!dp_link_is_video_pattern_valid(bp)) { + if (!msm_dp_link_is_video_pattern_valid(bp)) { DRM_ERROR("invalid link video pattern = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_video_pattern = bp; + link->msm_dp_link.test_video.test_video_pattern = bp; /* Read the requested color bit depth and dynamic range (Byte 0x232) */ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); @@ -404,88 +404,88 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) } /* Dynamic Range */ - link->dp_link.test_video.test_dyn_range = + link->msm_dp_link.test_video.test_dyn_range = (bp & DP_TEST_DYNAMIC_RANGE_CEA); /* Color bit depth */ bp &= DP_TEST_BIT_DEPTH_MASK; - if (!dp_link_is_bit_depth_valid(bp)) { + if (!msm_dp_link_is_bit_depth_valid(bp)) { DRM_ERROR("invalid link bit depth = 0x%x\n", bp); ret = -EINVAL; return ret; } - link->dp_link.test_video.test_bit_depth = bp; + link->msm_dp_link.test_video.test_bit_depth = bp; /* resolution timing params */ - ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, - &link->dp_link.test_video.test_h_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_h_total); if (ret) { DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, - &link->dp_link.test_video.test_v_total); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->msm_dp_link.test_video.test_v_total); if (ret) { DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, - &link->dp_link.test_video.test_h_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->msm_dp_link.test_video.test_h_start); if (ret) { DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, - &link->dp_link.test_video.test_v_start); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->msm_dp_link.test_video.test_v_start); if (ret) { DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, - &link->dp_link.test_video.test_hsync_pol, - &link->dp_link.test_video.test_hsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->msm_dp_link.test_video.test_hsync_pol, + &link->msm_dp_link.test_video.test_hsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, - &link->dp_link.test_video.test_vsync_pol, - &link->dp_link.test_video.test_vsync_width); + ret = msm_dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->msm_dp_link.test_video.test_vsync_pol, + &link->msm_dp_link.test_video.test_vsync_width); if (ret) { DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, - &link->dp_link.test_video.test_h_width); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->msm_dp_link.test_video.test_h_width); if (ret) { DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); return ret; } - ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, - &link->dp_link.test_video.test_v_height); + ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->msm_dp_link.test_video.test_v_height); if (ret) { DRM_ERROR("failed to parse test_v_height\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, - &link->dp_link.test_video.test_rr_d); - link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->msm_dp_link.test_video.test_rr_d); + link->msm_dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; if (ret) { DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); return ret; } - ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, - &link->dp_link.test_video.test_rr_n); + ret = msm_dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->msm_dp_link.test_video.test_rr_n); if (ret) { DRM_ERROR("failed to parse test_rr_n\n"); return ret; @@ -505,34 +505,34 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link) "TEST_V_HEIGHT = %d\n" "TEST_REFRESH_DENOMINATOR = %d\n" "TEST_REFRESH_NUMERATOR = %d\n", - link->dp_link.test_video.test_video_pattern, - link->dp_link.test_video.test_dyn_range, - link->dp_link.test_video.test_bit_depth, - link->dp_link.test_video.test_h_total, - link->dp_link.test_video.test_v_total, - link->dp_link.test_video.test_h_start, - link->dp_link.test_video.test_v_start, - link->dp_link.test_video.test_hsync_pol, - link->dp_link.test_video.test_hsync_width, - link->dp_link.test_video.test_vsync_pol, - link->dp_link.test_video.test_vsync_width, - link->dp_link.test_video.test_h_width, - link->dp_link.test_video.test_v_height, - link->dp_link.test_video.test_rr_d, - link->dp_link.test_video.test_rr_n); + link->msm_dp_link.test_video.test_video_pattern, + link->msm_dp_link.test_video.test_dyn_range, + link->msm_dp_link.test_video.test_bit_depth, + link->msm_dp_link.test_video.test_h_total, + link->msm_dp_link.test_video.test_v_total, + link->msm_dp_link.test_video.test_h_start, + link->msm_dp_link.test_video.test_v_start, + link->msm_dp_link.test_video.test_hsync_pol, + link->msm_dp_link.test_video.test_hsync_width, + link->msm_dp_link.test_video.test_vsync_pol, + link->msm_dp_link.test_video.test_vsync_width, + link->msm_dp_link.test_video.test_h_width, + link->msm_dp_link.test_video.test_v_height, + link->msm_dp_link.test_video.test_rr_d, + link->msm_dp_link.test_video.test_rr_n); return ret; } /** - * dp_link_parse_link_training_params() - parses link training parameters from + * msm_dp_link_parse_link_training_params() - parses link training parameters from * DPCD * @link: Display Port Driver data * * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane * count (Byte 0x220), and if these values parse are valid. */ -static int dp_link_parse_link_training_params(struct dp_link_private *link) +static int msm_dp_link_parse_link_training_params(struct msm_dp_link_private *link) { u8 bp; ssize_t rlen; @@ -571,13 +571,13 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link) } /** - * dp_link_parse_phy_test_params() - parses the phy link parameters + * msm_dp_link_parse_phy_test_params() - parses the phy link parameters * @link: Display Port Driver data * * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being * requested. */ -static int dp_link_parse_phy_test_params(struct dp_link_private *link) +static int msm_dp_link_parse_phy_test_params(struct msm_dp_link_private *link) { u8 data; ssize_t rlen; @@ -589,7 +589,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) return rlen; } - link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + link->msm_dp_link.phy_params.phy_test_pattern_sel = data & 0x07; drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data); @@ -608,12 +608,12 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link) } /** - * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * msm_dp_link_is_video_audio_test_requested() - checks for audio/video link request * @link: link requested by the sink * * Returns true if the requested link is a permitted audio/video link. */ -static bool dp_link_is_video_audio_test_requested(u32 link) +static bool msm_dp_link_is_video_audio_test_requested(u32 link) { u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | DP_TEST_LINK_AUDIO_PATTERN | @@ -624,13 +624,13 @@ static bool dp_link_is_video_audio_test_requested(u32 link) } /** - * dp_link_parse_request() - parses link request parameters from sink + * msm_dp_link_parse_request() - parses link request parameters from sink * @link: Display Port Driver data * * Parses the DPCD to check if an automated link is requested (Byte 0x201), * and what type of link automation is being requested (Byte 0x218). */ -static int dp_link_parse_request(struct dp_link_private *link) +static int msm_dp_link_parse_request(struct msm_dp_link_private *link) { int ret = 0; u8 data; @@ -672,27 +672,27 @@ static int dp_link_parse_request(struct dp_link_private *link) drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data); link->request.test_requested = data; if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { - ret = dp_link_parse_phy_test_params(link); + ret = msm_dp_link_parse_phy_test_params(link); if (ret) goto end; - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } if (link->request.test_requested == DP_TEST_LINK_TRAINING) { - ret = dp_link_parse_link_training_params(link); + ret = msm_dp_link_parse_link_training_params(link); if (ret) goto end; } - if (dp_link_is_video_audio_test_requested( + if (msm_dp_link_is_video_audio_test_requested( link->request.test_requested)) { - ret = dp_link_parse_video_pattern_params(link); + ret = msm_dp_link_parse_video_pattern_params(link); if (ret) goto end; - ret = dp_link_parse_audio_pattern_params(link); + ret = msm_dp_link_parse_audio_pattern_params(link); } end: /* @@ -700,29 +700,29 @@ end: * a DP_TEST_NAK. */ if (ret) { - link->dp_link.test_response = DP_TEST_NAK; + link->msm_dp_link.test_response = DP_TEST_NAK; } else { if (link->request.test_requested != DP_TEST_LINK_EDID_READ) - link->dp_link.test_response = DP_TEST_ACK; + link->msm_dp_link.test_response = DP_TEST_ACK; else - link->dp_link.test_response = + link->msm_dp_link.test_response = DP_TEST_EDID_CHECKSUM_WRITE; } return ret; } -static int dp_link_parse_sink_status_field(struct dp_link_private *link) +static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { int len; - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; len = drm_dp_read_sink_count(link->aux); if (len < 0) { DRM_ERROR("DP parse sink count failed\n"); return len; } - link->dp_link.sink_count = len; + link->msm_dp_link.sink_count = len; len = drm_dp_dpcd_read_link_status(link->aux, link->link_status); @@ -731,11 +731,11 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) return len; } - return dp_link_parse_request(link); + return msm_dp_link_parse_request(link); } /** - * dp_link_process_link_training_request() - processes new training requests + * msm_dp_link_process_link_training_request() - processes new training requests * @link: Display Port link data * * This function will handle new link training requests that are initiated by @@ -745,7 +745,7 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link) * The function will return 0 if a link training request has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_training_request(struct dp_link_private *link) +static int msm_dp_link_process_link_training_request(struct msm_dp_link_private *link) { if (link->request.test_requested != DP_TEST_LINK_TRAINING) return -EINVAL; @@ -756,49 +756,49 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); return 0; } -bool dp_link_send_test_response(struct dp_link *dp_link) +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, - dp_link->test_response); + msm_dp_link->test_response); return ret == 1; } -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable) +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid params\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); mutex_lock(&link->psm_mutex); if (enable) - ret = dp_aux_link_power_down(link->aux, link_info); + ret = msm_dp_aux_link_power_down(link->aux, link_info); else - ret = dp_aux_link_power_up(link->aux, link_info); + ret = msm_dp_aux_link_power_up(link->aux, link_info); if (ret) DRM_ERROR("Failed to %s low power mode\n", enable ? @@ -808,24 +808,24 @@ int dp_link_psm_config(struct dp_link *dp_link, return ret; } -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum) { - struct dp_link_private *link = NULL; + struct msm_dp_link_private *link = NULL; int ret = 0; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return false; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, checksum); return ret == 1; } -static void dp_link_parse_vx_px(struct dp_link_private *link) +static void msm_dp_link_parse_vx_px(struct msm_dp_link_private *link) { drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n", drm_dp_get_adjust_request_voltage(link->link_status, 0), @@ -845,31 +845,31 @@ static void dp_link_parse_vx_px(struct dp_link_private *link) */ drm_dbg_dp(link->drm_dev, "Current: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); - link->dp_link.phy_params.v_level = + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level = drm_dp_get_adjust_request_voltage(link->link_status, 0); - link->dp_link.phy_params.p_level = + link->msm_dp_link.phy_params.p_level = drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); - link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + link->msm_dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; drm_dbg_dp(link->drm_dev, "Requested: v_level = 0x%x, p_level = 0x%x\n", - link->dp_link.phy_params.v_level, - link->dp_link.phy_params.p_level); + link->msm_dp_link.phy_params.v_level, + link->msm_dp_link.phy_params.p_level); } /** - * dp_link_process_phy_test_pattern_request() - process new phy link requests + * msm_dp_link_process_phy_test_pattern_request() - process new phy link requests * @link: Display Port Driver data * * This function will handle new phy link pattern requests that are initiated * by the sink. The function will return 0 if a phy link pattern has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_phy_test_pattern_request( - struct dp_link_private *link) +static int msm_dp_link_process_phy_test_pattern_request( + struct msm_dp_link_private *link) { if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { drm_dbg_dp(link->drm_dev, "no phy test\n"); @@ -886,24 +886,24 @@ static int dp_link_process_phy_test_pattern_request( drm_dbg_dp(link->drm_dev, "Current: rate = 0x%x, lane count = 0x%x\n", - link->dp_link.link_params.rate, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.rate, + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "Requested: rate = 0x%x, lane count = 0x%x\n", link->request.test_link_rate, link->request.test_lane_count); - link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count; + link->msm_dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); - dp_link_parse_vx_px(link); + msm_dp_link_parse_vx_px(link); return 0; } -static bool dp_link_read_psr_error_status(struct dp_link_private *link) +static bool msm_dp_link_read_psr_error_status(struct msm_dp_link_private *link) { u8 status; @@ -921,7 +921,7 @@ static bool dp_link_read_psr_error_status(struct dp_link_private *link) return true; } -static bool dp_link_psr_capability_changed(struct dp_link_private *link) +static bool msm_dp_link_psr_capability_changed(struct msm_dp_link_private *link) { u8 status; @@ -941,7 +941,7 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) } /** - * dp_link_process_link_status_update() - processes link status updates + * msm_dp_link_process_link_status_update() - processes link status updates * @link: Display Port link module data * * This function will check for changes in the link status, e.g. clock @@ -951,13 +951,13 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) * The function will return 0 if the a link status update has been processed, * otherwise it will return -EINVAL. */ -static int dp_link_process_link_status_update(struct dp_link_private *link) +static int msm_dp_link_process_link_status_update(struct msm_dp_link_private *link) { bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes); + link->msm_dp_link.link_params.num_lanes); drm_dbg_dp(link->drm_dev, "channel_eq_done = %d, clock_recovery_done = %d\n", @@ -970,7 +970,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) } /** - * dp_link_process_ds_port_status_change() - process port status changes + * msm_dp_link_process_ds_port_status_change() - process port status changes * @link: Display Port Driver data * * This function will handle downstream port updates that are initiated by @@ -980,122 +980,122 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) * The function will return 0 if a downstream port update has been * processed, otherwise it will return -EINVAL. */ -static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +static int msm_dp_link_process_ds_port_status_change(struct msm_dp_link_private *link) { if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & DP_DOWNSTREAM_PORT_STATUS_CHANGED) goto reset; - if (link->prev_sink_count == link->dp_link.sink_count) + if (link->prev_sink_count == link->msm_dp_link.sink_count) return -EINVAL; reset: /* reset prev_sink_count */ - link->prev_sink_count = link->dp_link.sink_count; + link->prev_sink_count = link->msm_dp_link.sink_count; return 0; } -static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_video_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) && !(link->request.test_requested & DP_TEST_LINK_AUDIO_DISABLED_VIDEO); } -static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +static bool msm_dp_link_is_audio_pattern_requested(struct msm_dp_link_private *link) { return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); } -static void dp_link_reset_data(struct dp_link_private *link) +static void msm_dp_link_reset_data(struct msm_dp_link_private *link) { - link->request = (const struct dp_link_request){ 0 }; - link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; - link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; - link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; - link->dp_link.phy_params.phy_test_pattern_sel = 0; - link->dp_link.sink_request = 0; - link->dp_link.test_response = 0; + link->request = (const struct msm_dp_link_request){ 0 }; + link->msm_dp_link.test_video = (const struct msm_dp_link_test_video){ 0 }; + link->msm_dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->msm_dp_link.test_audio = (const struct msm_dp_link_test_audio){ 0 }; + link->msm_dp_link.phy_params.phy_test_pattern_sel = 0; + link->msm_dp_link.sink_request = 0; + link->msm_dp_link.test_response = 0; } /** - * dp_link_process_request() - handle HPD IRQ transition to HIGH - * @dp_link: pointer to link module data + * msm_dp_link_process_request() - handle HPD IRQ transition to HIGH + * @msm_dp_link: pointer to link module data * * This function will handle the HPD IRQ state transitions from LOW to HIGH * (including cases when there are back to back HPD IRQ HIGH) indicating * the start of a new link training request or sink status update. */ -int dp_link_process_request(struct dp_link *dp_link) +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link) { int ret = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); - dp_link_reset_data(link); + msm_dp_link_reset_data(link); - ret = dp_link_parse_sink_status_field(link); + ret = msm_dp_link_parse_sink_status_field(link); if (ret) return ret; if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { - dp_link->sink_request |= DP_TEST_LINK_EDID_READ; - } else if (!dp_link_process_ds_port_status_change(link)) { - dp_link->sink_request |= DS_PORT_STATUS_CHANGED; - } else if (!dp_link_process_link_training_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_TRAINING; - } else if (!dp_link_process_phy_test_pattern_request(link)) { - dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; - } else if (dp_link_read_psr_error_status(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + } else if (!msm_dp_link_process_ds_port_status_change(link)) { + msm_dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + } else if (!msm_dp_link_process_link_training_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_TRAINING; + } else if (!msm_dp_link_process_phy_test_pattern_request(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + } else if (msm_dp_link_read_psr_error_status(link)) { DRM_ERROR("PSR IRQ_HPD received\n"); - } else if (dp_link_psr_capability_changed(link)) { + } else if (msm_dp_link_psr_capability_changed(link)) { drm_dbg_dp(link->drm_dev, "PSR Capability changed\n"); } else { - ret = dp_link_process_link_status_update(link); + ret = msm_dp_link_process_link_status_update(link); if (!ret) { - dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + msm_dp_link->sink_request |= DP_LINK_STATUS_UPDATED; } else { - if (dp_link_is_video_pattern_requested(link)) { + if (msm_dp_link_is_video_pattern_requested(link)) { ret = 0; - dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + msm_dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; } - if (dp_link_is_audio_pattern_requested(link)) { - dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + if (msm_dp_link_is_audio_pattern_requested(link)) { + msm_dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; ret = -EINVAL; } } } drm_dbg_dp(link->drm_dev, "sink request=%#x\n", - dp_link->sink_request); + msm_dp_link->sink_request); return ret; } -int dp_link_get_colorimetry_config(struct dp_link *dp_link) +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link) { u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Unless a video pattern CTS test is ongoing, use RGB_VESA * Only RGB_VESA and RGB_CEA supported for now */ - if (dp_link_is_video_pattern_requested(link)) { - if (link->dp_link.test_video.test_dyn_range & + if (msm_dp_link_is_video_pattern_requested(link)) { + if (link->msm_dp_link.test_video.test_dyn_range & DP_TEST_DYNAMIC_RANGE_CEA) cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; } @@ -1103,22 +1103,22 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) return cc; } -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status) { int i; u8 max_p_level; int v_max = 0, p_max = 0; - struct dp_link_private *link; + struct msm_dp_link_private *link; - if (!dp_link) { + if (!msm_dp_link) { DRM_ERROR("invalid input\n"); return -EINVAL; } - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* use the max level across lanes */ - for (i = 0; i < dp_link->link_params.num_lanes; i++) { + for (i = 0; i < msm_dp_link->link_params.num_lanes; i++) { u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, i); @@ -1131,56 +1131,56 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) p_max = data_p; } - dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; - dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + msm_dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + msm_dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; /** * Adjust the voltage swing and pre-emphasis level combination to within * the allowable range. */ - if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested vSwingLevel=%d, change to %d\n", - dp_link->phy_params.v_level, + msm_dp_link->phy_params.v_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; } - if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { + if (msm_dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, DP_TRAIN_LEVEL_MAX); - dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; + msm_dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; } - max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level; - if (dp_link->phy_params.p_level > max_p_level) { + max_p_level = DP_TRAIN_LEVEL_MAX - msm_dp_link->phy_params.v_level; + if (msm_dp_link->phy_params.p_level > max_p_level) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", - dp_link->phy_params.p_level, + msm_dp_link->phy_params.p_level, max_p_level); - dp_link->phy_params.p_level = max_p_level; + msm_dp_link->phy_params.p_level = max_p_level; } drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n", - dp_link->phy_params.v_level, dp_link->phy_params.p_level); + msm_dp_link->phy_params.v_level, msm_dp_link->phy_params.p_level); return 0; } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link) { - dp_link->phy_params.v_level = 0; - dp_link->phy_params.p_level = 0; + msm_dp_link->phy_params.v_level = 0; + msm_dp_link->phy_params.p_level = 0; } -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp) { u32 tbd; - struct dp_link_private *link; + struct msm_dp_link_private *link; - link = container_of(dp_link, struct dp_link_private, dp_link); + link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link); /* * Few simplistic rules and assumptions made here: @@ -1209,10 +1209,10 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) return tbd; } -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux) { - struct dp_link_private *link; - struct dp_link *dp_link; + struct msm_dp_link_private *link; + struct msm_dp_link *msm_dp_link; if (!dev || !aux) { DRM_ERROR("invalid input\n"); @@ -1226,7 +1226,7 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) link->aux = aux; mutex_init(&link->psm_mutex); - dp_link = &link->dp_link; + msm_dp_link = &link->msm_dp_link; - return dp_link; + return msm_dp_link; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 5846337bb56f..8db5d5698a97 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -12,7 +12,7 @@ #define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) -struct dp_link_info { +struct msm_dp_link_info { unsigned char revision; unsigned int rate; unsigned int num_lanes; @@ -21,7 +21,7 @@ struct dp_link_info { #define DP_TRAIN_LEVEL_MAX 3 -struct dp_link_test_video { +struct msm_dp_link_test_video { u32 test_video_pattern; u32 test_bit_depth; u32 test_dyn_range; @@ -39,7 +39,7 @@ struct dp_link_test_video { u32 test_rr_n; }; -struct dp_link_test_audio { +struct msm_dp_link_test_audio { u32 test_audio_sampling_rate; u32 test_audio_channel_count; u32 test_audio_pattern_type; @@ -53,21 +53,21 @@ struct dp_link_test_audio { u32 test_audio_period_ch_8; }; -struct dp_link_phy_params { +struct msm_dp_link_phy_params { u32 phy_test_pattern_sel; u8 v_level; u8 p_level; }; -struct dp_link { +struct msm_dp_link { u32 sink_request; u32 test_response; u8 sink_count; - struct dp_link_test_video test_video; - struct dp_link_test_audio test_audio; - struct dp_link_phy_params phy_params; - struct dp_link_info link_params; + struct msm_dp_link_test_video test_video; + struct msm_dp_link_test_audio test_audio; + struct msm_dp_link_phy_params phy_params; + struct msm_dp_link_info link_params; }; /** @@ -78,7 +78,7 @@ struct dp_link { * git bit depth value. This function assumes that bit depth has * already been validated. */ -static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd) { /* * Few simplistic rules and assumptions made here: @@ -99,22 +99,22 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) } } -void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); -u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); -int dp_link_process_request(struct dp_link *dp_link); -int dp_link_get_colorimetry_config(struct dp_link *dp_link); -int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); -bool dp_link_send_test_response(struct dp_link *dp_link); -int dp_link_psm_config(struct dp_link *dp_link, - struct dp_link_info *link_info, bool enable); -bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); +void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link); +u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp); +int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link); +int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link); +int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status); +bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link); +int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link, + struct msm_dp_link_info *link_info, bool enable); +bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum); /** - * dp_link_get() - get the functionalities of dp test module + * msm_dp_link_get() - get the functionalities of dp test module * * - * return: a pointer to dp_link struct + * return: a pointer to msm_dp_link struct */ -struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); +struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux); #endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 6ff6c9ef351f..5d7eaa31bf31 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -14,52 +14,52 @@ #define DP_MAX_NUM_DP_LANES 4 #define DP_LINK_RATE_HBR2 540000 /* kbytes */ -struct dp_panel_private { +struct msm_dp_panel_private { struct device *dev; struct drm_device *drm_dev; - struct dp_panel dp_panel; + struct msm_dp_panel msm_dp_panel; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; bool panel_on; }; -static void dp_panel_read_psr_cap(struct dp_panel_private *panel) +static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel) { ssize_t rlen; - struct dp_panel *dp_panel; + struct msm_dp_panel *msm_dp_panel; - dp_panel = &panel->dp_panel; + msm_dp_panel = &panel->msm_dp_panel; /* edp sink */ - if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { + if (msm_dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT, - &dp_panel->psr_cap, sizeof(dp_panel->psr_cap)); - if (rlen == sizeof(dp_panel->psr_cap)) { + &msm_dp_panel->psr_cap, sizeof(msm_dp_panel->psr_cap)); + if (rlen == sizeof(msm_dp_panel->psr_cap)) { drm_dbg_dp(panel->drm_dev, "psr version: 0x%x, psr_cap: 0x%x\n", - dp_panel->psr_cap.version, - dp_panel->psr_cap.capabilities); + msm_dp_panel->psr_cap.version, + msm_dp_panel->psr_cap.capabilities); } else DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen); } } -static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel) { int rc; - struct dp_panel_private *panel; - struct dp_link_info *link_info; + struct msm_dp_panel_private *panel; + struct msm_dp_link_info *link_info; u8 *dpcd, major, minor; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); - dpcd = dp_panel->dpcd; + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); + dpcd = msm_dp_panel->dpcd; rc = drm_dp_read_dpcd_caps(panel->aux, dpcd); if (rc) return rc; - dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); - link_info = &dp_panel->link_info; + msm_dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd); + link_info = &msm_dp_panel->link_info; link_info->revision = dpcd[DP_DPCD_REV]; major = (link_info->revision >> 4) & 0x0f; minor = link_info->revision & 0x0f; @@ -68,12 +68,12 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) link_info->num_lanes = drm_dp_max_lane_count(dpcd); /* Limit data lanes from data-lanes of endpoint property of dtsi */ - if (link_info->num_lanes > dp_panel->max_dp_lanes) - link_info->num_lanes = dp_panel->max_dp_lanes; + if (link_info->num_lanes > msm_dp_panel->max_dp_lanes) + link_info->num_lanes = msm_dp_panel->max_dp_lanes; /* Limit link rate from link-frequencies of endpoint property of dtsi */ - if (link_info->rate > dp_panel->max_dp_link_rate) - link_info->rate = dp_panel->max_dp_link_rate; + if (link_info->rate > msm_dp_panel->max_dp_link_rate) + link_info->rate = msm_dp_panel->max_dp_link_rate; drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor); drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate); @@ -82,21 +82,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) if (drm_dp_enhanced_frame_cap(dpcd)) link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; - dp_panel_read_psr_cap(panel); + msm_dp_panel_read_psr_cap(panel); return rc; } -static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, +static u32 msm_dp_panel_get_supported_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - const struct dp_link_info *link_info; + const struct msm_dp_link_info *link_info; const u32 max_supported_bpp = 30, min_supported_bpp = 18; u32 bpp, data_rate_khz; bpp = min(mode_edid_bpp, max_supported_bpp); - link_info = &dp_panel->link_info; + link_info = &msm_dp_panel->link_info; data_rate_khz = link_info->num_lanes * link_info->rate * 8; do { @@ -108,39 +108,39 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, return min_supported_bpp; } -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { int rc, bw_code; int count; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel || !connector) { + if (!msm_dp_panel || !connector) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n", - dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate); + msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate); - rc = dp_panel_read_dpcd(dp_panel); + rc = msm_dp_panel_read_dpcd(msm_dp_panel); if (rc) { DRM_ERROR("read dpcd failed %d\n", rc); return rc; } - bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + bw_code = drm_dp_link_rate_to_bw_code(msm_dp_panel->link_info.rate); if (!is_link_rate_valid(bw_code) || - !is_lane_count_valid(dp_panel->link_info.num_lanes) || - (bw_code > dp_panel->max_bw_code)) { - DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate, - dp_panel->link_info.num_lanes); + !is_lane_count_valid(msm_dp_panel->link_info.num_lanes) || + (bw_code > msm_dp_panel->max_bw_code)) { + DRM_ERROR("Illegal link rate=%d lane=%d\n", msm_dp_panel->link_info.rate, + msm_dp_panel->link_info.num_lanes); return -EINVAL; } - if (drm_dp_is_branch(dp_panel->dpcd)) { + if (drm_dp_is_branch(msm_dp_panel->dpcd)) { count = drm_dp_read_sink_count(panel->aux); if (!count) { panel->link->sink_count = 0; @@ -148,21 +148,21 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, } } - rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd, - dp_panel->downstream_ports); + rc = drm_dp_read_downstream_info(panel->aux, msm_dp_panel->dpcd, + msm_dp_panel->downstream_ports); if (rc) return rc; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); - dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); + msm_dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc); - drm_edid_connector_update(connector, dp_panel->drm_edid); + drm_edid_connector_update(connector, msm_dp_panel->drm_edid); - if (!dp_panel->drm_edid) { + if (!msm_dp_panel->drm_edid) { DRM_ERROR("panel edid read failed\n"); /* check edid read fail is due to unplug */ - if (!dp_catalog_link_is_connected(panel->catalog)) { + if (!msm_dp_catalog_link_is_connected(panel->catalog)) { rc = -ETIMEDOUT; goto end; } @@ -172,87 +172,87 @@ end: return rc; } -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; u32 bpp; - if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + if (!msm_dp_panel || !mode_edid_bpp || !mode_pclk_khz) { DRM_ERROR("invalid input\n"); return 0; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); - if (dp_panel->video_test) - bpp = dp_link_bit_depth_to_bpp( + if (msm_dp_panel->video_test) + bpp = msm_dp_link_bit_depth_to_bpp( panel->link->test_video.test_bit_depth); else - bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + bpp = msm_dp_panel_get_supported_bpp(msm_dp_panel, mode_edid_bpp, mode_pclk_khz); return bpp; } -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector) { - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - if (dp_panel->drm_edid) + if (msm_dp_panel->drm_edid) return drm_edid_connector_add_modes(connector); return 0; } -static u8 dp_panel_get_edid_checksum(const struct edid *edid) +static u8 msm_dp_panel_get_edid_checksum(const struct edid *edid) { edid += edid->extensions; return edid->checksum; } -void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { /* FIXME: get rid of drm_edid_raw() */ - const struct edid *edid = drm_edid_raw(dp_panel->drm_edid); + const struct edid *edid = drm_edid_raw(msm_dp_panel->drm_edid); u8 checksum; if (edid) - checksum = dp_panel_get_edid_checksum(edid); + checksum = msm_dp_panel_get_edid_checksum(edid); else - checksum = dp_panel->connector->real_edid_checksum; + checksum = msm_dp_panel->connector->real_edid_checksum; - dp_link_send_edid_checksum(panel->link, checksum); - dp_link_send_test_response(panel->link); + msm_dp_link_send_edid_checksum(panel->link, checksum); + msm_dp_link_send_test_response(panel->link); } } -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; if (!panel->panel_on) { @@ -262,31 +262,31 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) } if (!enable) { - dp_catalog_panel_tpg_disable(catalog); + msm_dp_catalog_panel_tpg_disable(catalog); return; } drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n"); - dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); + msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode); } -static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) +static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; - struct dp_display_mode *dp_mode; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; + struct msm_dp_display_mode *msm_dp_mode; struct drm_dp_vsc_sdp vsc_sdp_data; struct dp_sdp vsc_sdp; ssize_t len; - if (!dp_panel) { + if (!msm_dp_panel) { DRM_ERROR("invalid input\n"); return -EINVAL; } - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_mode = &dp_panel->dp_mode; + msm_dp_mode = &msm_dp_panel->msm_dp_mode; memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data)); @@ -300,7 +300,7 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT; /* VSC SDP Payload for DB17 */ - vsc_sdp_data.bpc = dp_mode->bpp / 3; + vsc_sdp_data.bpc = msm_dp_mode->bpp / 3; vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA; /* VSC SDP Payload for DB18 */ @@ -312,36 +312,36 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel) return len; } - dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); + msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp); return 0; } -void dp_panel_dump_regs(struct dp_panel *dp_panel) +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel) { - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - dp_catalog_dump_regs(catalog); + msm_dp_catalog_dump_regs(catalog); } -int dp_panel_timing_cfg(struct dp_panel *dp_panel) +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel) { u32 data, total_ver, total_hor; - struct dp_catalog *catalog; - struct dp_panel_private *panel; + struct msm_dp_catalog *catalog; + struct msm_dp_panel_private *panel; struct drm_display_mode *drm_mode; u32 width_blanking; u32 sync_start; - u32 dp_active; + u32 msm_dp_active; u32 total; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); catalog = panel->catalog; - drm_mode = &panel->dp_panel.dp_mode.drm_mode; + drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode; drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n", drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, @@ -371,9 +371,9 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data = drm_mode->vsync_end - drm_mode->vsync_start; data <<= 16; - data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= (panel->msm_dp_panel.msm_dp_mode.v_active_low << 31); data |= drm_mode->hsync_end - drm_mode->hsync_start; - data |= (panel->dp_panel.dp_mode.h_active_low << 15); + data |= (panel->msm_dp_panel.msm_dp_mode.h_active_low << 15); width_blanking = data; @@ -381,26 +381,26 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data <<= 16; data |= drm_mode->hdisplay; - dp_active = data; + msm_dp_active = data; - dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active); + msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active); - if (dp_panel->dp_mode.out_fmt_is_yuv_420) - dp_panel_setup_vsc_sdp_yuv_420(dp_panel); + if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420) + msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel); panel->panel_on = true; return 0; } -int dp_panel_init_panel_info(struct dp_panel *dp_panel) +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel) { struct drm_display_mode *drm_mode; - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; - drm_mode = &dp_panel->dp_mode.drm_mode; + drm_mode = &msm_dp_panel->msm_dp_mode.drm_mode; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); /* * print resolution info as this is a result @@ -421,18 +421,18 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) drm_mode->vsync_end - drm_mode->vsync_start); drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n", drm_mode->clock); - drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); + drm_dbg_dp(panel->drm_dev, "bpp = %d\n", msm_dp_panel->msm_dp_mode.bpp); - dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp, - dp_panel->dp_mode.drm_mode.clock); + msm_dp_panel->msm_dp_mode.bpp = msm_dp_panel_get_mode_bpp(msm_dp_panel, msm_dp_panel->msm_dp_mode.bpp, + msm_dp_panel->msm_dp_mode.drm_mode.clock); drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", - dp_panel->dp_mode.bpp); + msm_dp_panel->msm_dp_mode.bpp); return 0; } -static u32 dp_panel_link_frequencies(struct device_node *of_node) +static u32 msm_dp_panel_link_frequencies(struct device_node *of_node) { struct device_node *endpoint; u64 frequency = 0; @@ -456,17 +456,17 @@ static u32 dp_panel_link_frequencies(struct device_node *of_node) return frequency; } -static int dp_panel_parse_dt(struct dp_panel *dp_panel) +static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel) { - struct dp_panel_private *panel; + struct msm_dp_panel_private *panel; struct device_node *of_node; int cnt; - panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel); of_node = panel->dev->of_node; /* - * data-lanes is the property of dp_out endpoint + * data-lanes is the property of msm_dp_out endpoint */ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES); if (cnt < 0) { @@ -475,21 +475,21 @@ static int dp_panel_parse_dt(struct dp_panel *dp_panel) } if (cnt > 0) - dp_panel->max_dp_lanes = cnt; + msm_dp_panel->max_dp_lanes = cnt; else - dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ + msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */ - dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node); - if (!dp_panel->max_dp_link_rate) - dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; + msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node); + if (!msm_dp_panel->max_dp_link_rate) + msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2; return 0; } -struct dp_panel *dp_panel_get(struct dp_panel_in *in) +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in) { - struct dp_panel_private *panel; - struct dp_panel *dp_panel; + struct msm_dp_panel_private *panel; + struct msm_dp_panel *msm_dp_panel; int ret; if (!in->dev || !in->catalog || !in->aux || !in->link) { @@ -506,20 +506,20 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in) panel->catalog = in->catalog; panel->link = in->link; - dp_panel = &panel->dp_panel; - dp_panel->max_bw_code = DP_LINK_BW_8_1; + msm_dp_panel = &panel->msm_dp_panel; + msm_dp_panel->max_bw_code = DP_LINK_BW_8_1; - ret = dp_panel_parse_dt(dp_panel); + ret = msm_dp_panel_parse_dt(msm_dp_panel); if (ret) return ERR_PTR(ret); - return dp_panel; + return msm_dp_panel; } -void dp_panel_put(struct dp_panel *dp_panel) +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel) { - if (!dp_panel) + if (!msm_dp_panel) return; - drm_edid_free(dp_panel->drm_edid); + drm_edid_free(msm_dp_panel->drm_edid); } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 6722e3923fa5..0e944db3adf2 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -13,7 +13,7 @@ struct edid; -struct dp_display_mode { +struct msm_dp_display_mode { struct drm_display_mode drm_mode; u32 bpp; u32 h_active_low; @@ -21,28 +21,28 @@ struct dp_display_mode { bool out_fmt_is_yuv_420; }; -struct dp_panel_in { +struct msm_dp_panel_in { struct device *dev; struct drm_dp_aux *aux; - struct dp_link *link; - struct dp_catalog *catalog; + struct msm_dp_link *link; + struct msm_dp_catalog *catalog; }; -struct dp_panel_psr { +struct msm_dp_panel_psr { u8 version; u8 capabilities; }; -struct dp_panel { +struct msm_dp_panel { /* dpcd raw data */ u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - struct dp_link_info link_info; + struct msm_dp_link_info link_info; const struct drm_edid *drm_edid; struct drm_connector *connector; - struct dp_display_mode dp_mode; - struct dp_panel_psr psr_cap; + struct msm_dp_display_mode msm_dp_mode; + struct msm_dp_panel_psr psr_cap; bool video_test; bool vsc_sdp_supported; @@ -52,18 +52,18 @@ struct dp_panel { u32 max_bw_code; }; -int dp_panel_init_panel_info(struct dp_panel *dp_panel); -int dp_panel_deinit(struct dp_panel *dp_panel); -int dp_panel_timing_cfg(struct dp_panel *dp_panel); -void dp_panel_dump_regs(struct dp_panel *dp_panel); -int dp_panel_read_sink_caps(struct dp_panel *dp_panel, +int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel); +int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, +u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp, u32 mode_pclk_khz); -int dp_panel_get_modes(struct dp_panel *dp_panel, +int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel, struct drm_connector *connector); -void dp_panel_handle_sink_request(struct dp_panel *dp_panel); -void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); +void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel); +void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable); /** * is_link_rate_valid() - validates the link rate @@ -80,7 +80,7 @@ static inline bool is_link_rate_valid(u32 bw_code) } /** - * dp_link_is_lane_count_valid() - validates the lane count + * msm_dp_link_is_lane_count_valid() - validates the lane count * @lane_count: lane count requested by the sink * * Returns true if the requested lane count is supported. @@ -92,6 +92,6 @@ static inline bool is_lane_count_valid(u32 lane_count) lane_count == 4); } -struct dp_panel *dp_panel_get(struct dp_panel_in *in); -void dp_panel_put(struct dp_panel *dp_panel); +struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in); +void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel); #endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c index da9207caf72d..2a40f07fe2d5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.c +++ b/drivers/gpu/drm/msm/dp/dp_utils.c @@ -9,7 +9,7 @@ #define DP_SDP_HEADER_SIZE 8 -u8 dp_utils_get_g0_value(u8 data) +u8 msm_dp_utils_get_g0_value(u8 data) { u8 c[4]; u8 g[4]; @@ -30,7 +30,7 @@ u8 dp_utils_get_g0_value(u8 data) return ret_data; } -u8 dp_utils_get_g1_value(u8 data) +u8 msm_dp_utils_get_g1_value(u8 data) { u8 c[4]; u8 g[4]; @@ -51,7 +51,7 @@ u8 dp_utils_get_g1_value(u8 data) return ret_data; } -u8 dp_utils_calculate_parity(u32 data) +u8 msm_dp_utils_calculate_parity(u32 data) { u8 x0 = 0; u8 x1 = 0; @@ -65,8 +65,8 @@ u8 dp_utils_calculate_parity(u32 data) iData = (data >> i * 4) & 0xF; ci = iData ^ x1; - x1 = x0 ^ dp_utils_get_g1_value(ci); - x0 = dp_utils_get_g0_value(ci); + x1 = x0 ^ msm_dp_utils_get_g1_value(ci); + x0 = msm_dp_utils_get_g0_value(ci); } parity_byte = x1 | (x0 << 4); @@ -74,7 +74,7 @@ u8 dp_utils_calculate_parity(u32 data) return parity_byte; } -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff) { size_t length; @@ -83,14 +83,14 @@ ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_b return -ENOSPC; header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) | - FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) | + FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) | FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) | - FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1)); + FIELD_PREP(PARITY_1_MASK, msm_dp_utils_calculate_parity(sdp_header->HB1)); header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) | - FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) | + FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) | FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) | - FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3)); + FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3)); return length; } diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h index 7c056d9798dc..88d53157f5b5 100644 --- a/drivers/gpu/drm/msm/dp/dp_utils.h +++ b/drivers/gpu/drm/msm/dp/dp_utils.h @@ -28,9 +28,9 @@ #define HEADER_3_MASK GENMASK(23, 16) #define PARITY_3_MASK GENMASK(31, 24) -u8 dp_utils_get_g0_value(u8 data); -u8 dp_utils_get_g1_value(u8 data); -u8 dp_utils_calculate_parity(u32 data); -ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); +u8 msm_dp_utils_get_g0_value(u8 data); +u8 msm_dp_utils_get_g1_value(u8 data); +u8 msm_dp_utils_calculate_parity(u32 data); +ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff); #endif /* _DP_UTILS_H_ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c index e6ffaf92d26d..a719fd33d9d8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -157,9 +157,8 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) #define HDMI_MHZ_TO_HZ ((u64)1000000) static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) { - u32 const ratio_list[] = {1, 2, 3, 4, 5, 6, - 9, 10, 12, 15, 25}; - u32 const band_list[] = {0, 1, 2, 3}; + static const u32 ratio_list[] = {1, 2, 3, 4, 5, 6, 9, 10, 12, 15, 25}; + static const u32 band_list[] = {0, 1, 2, 3}; u32 const sz_ratio = ARRAY_SIZE(ratio_list); u32 const sz_band = ARRAY_SIZE(band_list); u32 const cmp_cnt = 1024; @@ -270,7 +269,7 @@ find_optimal_index: case 25: found_hsclk_divsel = 14; break; - }; + } pd->vco_freq = found_vco_freq; pd->tx_band_sel = found_tx_band_sel; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 8c13b08708d2..c2dd8ef6d6dc 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -11,6 +11,7 @@ #include <linux/of_address.h> #include <linux/uaccess.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -291,7 +292,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (priv->kms_init) { drm_kms_helper_poll_init(ddev); - msm_fbdev_setup(ddev); + drm_client_setup(ddev, NULL); } return 0; @@ -902,6 +903,7 @@ static const struct drm_driver msm_driver = { #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif + MSM_FBDEV_DRIVER_OPS, .show_fdinfo = msm_show_fdinfo, .ioctls = msm_ioctls, .num_ioctls = ARRAY_SIZE(msm_ioctls), @@ -983,6 +985,10 @@ module_param(prefer_mdp5, bool, 0444); /* list all platforms supported by both mdp5 and dpu drivers */ static const char *const msm_mdp5_dpu_migration[] = { + "qcom,msm8917-mdp5", + "qcom,msm8937-mdp5", + "qcom,msm8953-mdp5", + "qcom,msm8996-mdp5", "qcom,sdm630-mdp5", "qcom,sdm660-mdp5", NULL, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 2e28a1344636..d8c9a1b19263 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -36,6 +36,9 @@ extern struct fault_attr fail_gem_alloc; extern struct fault_attr fail_gem_iova; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct msm_kms; struct msm_gpu; struct msm_mmu; @@ -49,7 +52,6 @@ struct msm_gem_vma; struct msm_disp_state; #define MAX_CRTCS 8 -#define MAX_BRIDGES 8 #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) @@ -68,23 +70,6 @@ enum msm_dsi_controller { }; #define MSM_GPU_MAX_RINGS 4 -#define MAX_H_TILES_PER_DISPLAY 2 - -/** - * struct msm_display_topology - defines a display topology pipeline - * @num_lm: number of layer mixers used - * @num_intf: number of interfaces the panel is mounted on - * @num_dspp: number of dspp blocks used - * @num_dsc: number of Display Stream Compression (DSC) blocks used - * @needs_cdm: indicates whether cdm block is needed for this display topology - */ -struct msm_display_topology { - u32 num_lm; - u32 num_intf; - u32 num_dspp; - u32 num_dsc; - bool needs_cdm; -}; /* Commit/Event thread specific structure */ struct msm_drm_thread { @@ -290,11 +275,13 @@ struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format); #ifdef CONFIG_DRM_FBDEV_EMULATION -void msm_fbdev_setup(struct drm_device *dev); +int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define MSM_FBDEV_DRIVER_OPS \ + .fbdev_probe = msm_fbdev_driver_fbdev_probe #else -static inline void msm_fbdev_setup(struct drm_device *dev) -{ -} +#define MSM_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif struct hdmi; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 030bedac632d..c62249b1ab3d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -65,8 +65,31 @@ static const struct fb_ops msm_fb_ops = { .fb_destroy = msm_fbdev_fb_destroy, }; -static int msm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = { + .fb_dirty = msm_fbdev_fb_dirty, +}; + +int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = helper->dev; struct msm_drm_private *priv = dev->dev_private; @@ -114,6 +137,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, DBG("fbi=%p, dev=%p", fbi, dev); + helper->funcs = &msm_fbdev_helper_funcs; helper->fb = fb; fbi->fbops = &msm_fb_ops; @@ -138,119 +162,3 @@ fail: drm_framebuffer_remove(fb); return ret; } - -static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper, - struct drm_clip_rect *clip) -{ - struct drm_device *dev = helper->dev; - int ret; - - /* Call damage handlers only if necessary */ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - return ret; - } - - return 0; -} - -static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { - .fb_probe = msm_fbdev_create, - .fb_dirty = msm_fbdev_fb_dirty, -}; - -/* - * struct drm_client - */ - -static void msm_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int msm_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int msm_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs msm_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = msm_fbdev_client_unregister, - .restore = msm_fbdev_client_restore, - .hotplug = msm_fbdev_client_hotplug, -}; - -/* initialize fbdev helper */ -void msm_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *helper; - int ret; - - if (!fbdev) - return; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - helper = kzalloc(sizeof(*helper), GFP_KERNEL); - if (!helper) - return; - drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &msm_fbdev_client_funcs); - if (ret) { - drm_err(dev, "Failed to register client: %d\n", ret); - goto err_drm_fb_helper_unprepare; - } - - drm_client_register(&helper->client); - - return; - -err_drm_fb_helper_unprepare: - drm_fb_helper_unprepare(helper); - kfree(helper); -} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a274b8466423..0d4a3744cfcb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_unlock(&gpu->active_lock); gpu->funcs->submit(gpu, submit); - gpu->cur_ctx_seqno = submit->queue->ctx->seqno; + submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; pm_runtime_put(&gpu->pdev->dev); hangcheck_timer_reset(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 1f02bb9956be..7cabc8480d7c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -194,17 +194,6 @@ struct msm_gpu { refcount_t sysprof_active; /** - * cur_ctx_seqno: - * - * The ctx->seqno value of the last context to submit rendering, - * and the one with current pgtables installed (for generations - * that support per-context pgtables). Tracked by seqno rather - * than pointer value to avoid dangling pointers, and cases where - * a ctx can be freed and a new one created with the same address. - */ - int cur_ctx_seqno; - - /** * lock: * * General lock for serializing all the gpu things. diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index ea70c1c32d94..6970b0f7f457 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; struct msm_drm_private *priv = gpu->dev->dev_private; + int ret; /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) @@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) mutex_init(&df->lock); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, - DEV_PM_QOS_MIN_FREQUENCY, 0); + ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (ret < 0) { + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n"); + return; + } msm_devfreq_profile.initial_freq = gpu->fast_rate; diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index ac40d857bc45..7f863282db0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume, TP_printk("%u", __entry->dummy) ); +TRACE_EVENT(msm_gpu_preemption_trigger, + TP_PROTO(int ring_id_from, int ring_id_to), + TP_ARGS(ring_id_from, ring_id_to), + TP_STRUCT__entry( + __field(int, ring_id_from) + __field(int, ring_id_to) + ), + TP_fast_assign( + __entry->ring_id_from = ring_id_from; + __entry->ring_id_to = ring_id_to; + ), + TP_printk("preempting %u -> %u", + __entry->ring_id_from, + __entry->ring_id_to) +); + +TRACE_EVENT(msm_gpu_preemption_irq, + TP_PROTO(u32 ring_id), + TP_ARGS(ring_id), + TP_STRUCT__entry( + __field(u32, ring_id) + ), + TP_fast_assign( + __entry->ring_id = ring_id; + ), + TP_printk("preempted to %u", __entry->ring_id) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index af6a6fcb1173..f3326d09bdbc 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -5,11 +5,11 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/aperture.h> #include <linux/kthread.h> #include <linux/sched/mm.h> #include <uapi/linux/sched/types.h> -#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_mode_config.h> #include <drm/drm_vblank.h> @@ -237,7 +237,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv) int ret; /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(drv); + ret = aperture_remove_all_conflicting_devices(drv->name); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 1e0c54de3716..e60162744c66 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -92,12 +92,6 @@ struct msm_kms_funcs { * Format handling: */ - /* do format checking on format modified through fb_cmd2 modifiers */ - int (*check_modified_format)(const struct msm_kms *kms, - const struct msm_format *msm_fmt, - const struct drm_mode_fb_cmd2 *cmd, - struct drm_gem_object **bos); - /* misc: */ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index faa88fd6eb4d..b7bd899ead44 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -19,13 +19,7 @@ #include "msm_mdss.h" #include "msm_kms.h" -#define HW_REV 0x0 -#define HW_INTR_STATUS 0x0010 - -#define UBWC_DEC_HW_VERSION 0x58 -#define UBWC_STATIC 0x144 -#define UBWC_CTRL_2 0x150 -#define UBWC_PREDICTION_MODE 0x154 +#include <generated/mdss.xml.h> #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ @@ -83,7 +77,7 @@ static void msm_mdss_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); - interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS); + interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); while (interrupts) { irq_hw_number_t hwirq = fls(interrupts) - 1; @@ -173,7 +167,7 @@ static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) { const struct msm_mdss_data *data = msm_mdss->mdss_data; - writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) @@ -189,7 +183,7 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) if (data->ubwc_enc_version == UBWC_1_0) value |= BIT(8); - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); } static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) @@ -200,21 +194,22 @@ static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) (data->highest_bank_bit & 0x7) << 4 | (data->macrotile_mode & 0x1) << 12; - writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC); + writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); if (data->ubwc_enc_version == UBWC_3_0) { - writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } else { if (data->ubwc_dec_version == UBWC_4_3) - writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2); + writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); else - writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); - writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); + writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); + writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); } } -#define MDSS_HW_MAJ_MIN GENMASK(31, 16) +#define MDSS_HW_MAJ_MIN \ + (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK) #define MDSS_HW_MSM8996 0x1007 #define MDSS_HW_MSM8937 0x100e @@ -235,7 +230,7 @@ static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_m if (!data) return NULL; - hw_rev = readl_relaxed(mdss->mmio + HW_REV); + hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION); hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev); if (hw_rev == MDSS_HW_MSM8996 || @@ -334,9 +329,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", msm_mdss->mdss_data->ubwc_dec_version); dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", - readl_relaxed(msm_mdss->mmio + HW_REV)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", - readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION)); + readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); break; } @@ -573,6 +568,16 @@ static const struct msm_mdss_data qcm2290_data = { .reg_bus_bw = 76800, }; +static const struct msm_mdss_data sa8775p_data = { + .ubwc_enc_version = UBWC_4_0, + .ubwc_dec_version = UBWC_4_0, + .ubwc_swizzle = 4, + .ubwc_static = 1, + .highest_bank_bit = 0, + .macrotile_mode = 1, + .reg_bus_bw = 74000, +}; + static const struct msm_mdss_data sc7180_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, @@ -710,6 +715,7 @@ static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss" }, { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, + { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data }, { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 9d6655f96f0c..c803556a8f64 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -64,7 +64,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, char name[32]; int ret; - /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ + /* We assume everywhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ)); ring = kzalloc(sizeof(*ring), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 0d6beb8cd39a..d1e49f701c81 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -31,10 +31,12 @@ struct msm_rbmemptrs { volatile uint32_t rptr; volatile uint32_t fence; /* Introduced on A7xx */ + volatile uint32_t bv_rptr; volatile uint32_t bv_fence; volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT]; volatile u64 ttbr0; + volatile u32 context_idr; }; struct msm_cp_state { @@ -99,6 +101,22 @@ struct msm_ringbuffer { * preemption. Can be aquired from irq context. */ spinlock_t preempt_lock; + + /* + * Whether we skipped writing wptr and it needs to be updated in the + * future when the ring becomes current. + */ + bool restore_wptr; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the last context to submit to this ring + * Tracked by seqno rather than pointer value to avoid dangling + * pointers, and cases where a ctx can be freed and a new one created + * with the same address. + */ + int cur_ctx_seqno; }; struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 0e803125a325..2fc3eaf81f44 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -161,6 +161,8 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; enum drm_sched_priority sched_prio; + extern int enable_preemption; + bool preemption_supported; unsigned ring_nr; int ret; @@ -170,6 +172,11 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, if (!priv->gpu) return -ENODEV; + preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; + + if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) + return -EINVAL; + ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml index 97608603ea62..2db425abf0f3 100644 --- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -2358,7 +2358,12 @@ to upconvert to 32b float internally? <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/> </array> - <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/> + <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"> + <bitfield name="STOP" pos="0" type="boolean"/> + <bitfield name="LEVEL" low="6" high="7"/> + <bitfield name="USES_GMEM" pos="8" type="boolean"/> + <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/> + </reg32> <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/> <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/> <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/> diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index cab01af55d22..55a35182858c 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -581,8 +581,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> and forcibly switch to the indicated context. </doc> <value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/> - <!-- Note, kgsl calls this CP_SET_AMBLE: --> - <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/> + <value name="CP_SET_AMBLE" value="0x55" variants="A6XX-"/> <!-- Seems to always have the payload: @@ -2013,42 +2012,38 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> </domain> -<domain name="CP_SET_CTXSWITCH_IB" width="32"> +<domain name="CP_SET_AMBLE" width="32"> <doc> - Used by the userspace driver to set various IB's which are - executed during context save/restore for handling - state that isn't restored by the - context switch routine itself. - </doc> - <enum name="ctxswitch_ib"> - <value name="RESTORE_IB" value="0"> + Used by the userspace and kernel drivers to set various IB's + which are executed during context save/restore for handling + state that isn't restored by the context switch routine itself. + </doc> + <enum name="amble_type"> + <value name="PREAMBLE_AMBLE_TYPE" value="0"> <doc>Executed unconditionally when switching back to the context.</doc> </value> - <value name="YIELD_RESTORE_IB" value="1"> + <value name="BIN_PREAMBLE_AMBLE_TYPE" value="1"> <doc> Executed when switching back after switching away during execution of - a CP_SET_MARKER packet with RM6_YIELD as the - payload *and* the normal save routine was - bypassed for a shorter one. I think this is - connected to the "skipsaverestore" bit set by - the kernel when preempting. + a CP_SET_MARKER packet with RM6_BIN_RENDER_END as the + payload *and* skipsaverestore is set. This is + expected to restore static register values not + saved when skipsaverestore is set. </doc> </value> - <value name="SAVE_IB" value="2"> + <value name="POSTAMBLE_AMBLE_TYPE" value="2"> <doc> Executed when switching away from the context, except for context switches initiated via CP_YIELD. </doc> </value> - <value name="RB_SAVE_IB" value="3"> + <value name="KMD_AMBLE_TYPE" value="3"> <doc> This can only be set by the RB (i.e. the kernel) and executes with protected mode off, but - is otherwise similar to SAVE_IB. - - Note, kgsl calls this CP_KMD_AMBLE_TYPE + is otherwise similar to POSTAMBLE_AMBLE_TYPE. </doc> </value> </enum> @@ -2060,7 +2055,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) </reg32> <reg32 offset="2" name="2"> <bitfield name="DWORDS" low="0" high="19" type="uint"/> - <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/> + <bitfield name="TYPE" low="20" high="21" type="amble_type"/> </reg32> </domain> diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml index 92f3263af170..8c9c4af350aa 100644 --- a/drivers/gpu/drm/msm/registers/display/mdp5.xml +++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml @@ -9,22 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> <domain name="VBIF" width="32"> </domain> -<domain name="MDSS" width="32"> - <reg32 offset="0x00000" name="HW_VERSION"> - <bitfield name="STEP" low="0" high="15" type="uint"/> - <bitfield name="MINOR" low="16" high="27" type="uint"/> - <bitfield name="MAJOR" low="28" high="31" type="uint"/> - </reg32> - - <reg32 offset="0x00010" name="HW_INTR_STATUS"> - <bitfield name="INTR_MDP" pos="0" type="boolean"/> - <bitfield name="INTR_DSI0" pos="4" type="boolean"/> - <bitfield name="INTR_DSI1" pos="5" type="boolean"/> - <bitfield name="INTR_HDMI" pos="8" type="boolean"/> - <bitfield name="INTR_EDP" pos="12" type="boolean"/> - </reg32> -</domain> - <domain name="MDP5" width="32"> <enum name="mdp5_intf_type"> diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml new file mode 100644 index 000000000000..ac85caf1575c --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdss.xml @@ -0,0 +1,29 @@ +<?xml version="1.0" encoding="UTF-8"?> +<database xmlns="http://nouveau.freedesktop.org/" +xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" +xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> +<import file="freedreno_copyright.xml"/> + +<domain name="MDSS" width="32"> + <reg32 offset="0x00000" name="HW_VERSION"> + <bitfield name="STEP" low="0" high="15" type="uint"/> + <bitfield name="MINOR" low="16" high="27" type="uint"/> + <bitfield name="MAJOR" low="28" high="31" type="uint"/> + </reg32> + + <reg32 offset="0x00010" name="HW_INTR_STATUS"> + <bitfield name="INTR_MDP" pos="0" type="boolean"/> + <bitfield name="INTR_DSI0" pos="4" type="boolean"/> + <bitfield name="INTR_DSI1" pos="5" type="boolean"/> + <bitfield name="INTR_HDMI" pos="8" type="boolean"/> + <bitfield name="INTR_EDP" pos="12" type="boolean"/> + </reg32> + + <reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/> + + <reg32 offset="0x00144" name="UBWC_STATIC"/> + <reg32 offset="0x00150" name="UBWC_CTRL_2"/> + <reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/> +</domain> + +</database> diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig index 518b53345354..264e74f45554 100644 --- a/drivers/gpu/drm/mxsfb/Kconfig +++ b/drivers/gpu/drm/mxsfb/Kconfig @@ -9,6 +9,7 @@ config DRM_MXSFB depends on DRM && OF depends on COMMON_CLK depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_MXS select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER @@ -26,6 +27,7 @@ config DRM_IMX_LCDIF depends on DRM && OF depends on COMMON_CLK depends on ARCH_MXC || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_MXS select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 0f895b8a99d6..58ccad9c425d 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_fbdev_dma.h> @@ -243,6 +244,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver lcdif_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "imx-lcdif", .desc = "i.MX LCDIF Controller DRM", @@ -275,7 +277,7 @@ static int lcdif_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index cb5ce4e81fc7..34a98717b72c 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -19,6 +19,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -331,6 +332,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver mxsfb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &fops, .name = "mxsfb-drm", .desc = "MXSFB Controller DRM", @@ -364,7 +366,7 @@ static int mxsfb_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ceef470c9fbf..ce840300578d 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -4,6 +4,7 @@ config DRM_NOUVEAU depends on DRM && PCI && MMU select IOMMU_API select FW_LOADER + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/nouveau/dispnv50/tile.h b/drivers/gpu/drm/nouveau/dispnv50/tile.h new file mode 100644 index 000000000000..e2be82830cf7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/tile.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NV50_TILE_H__ +#define __NV50_TILE_H__ + +#include <linux/types.h> +#include <linux/math.h> + +/* + * Tiling parameters for NV50+. + * GOB = Group of bytes, the main unit for tiling blocks. + * Tiling blocks are a power of 2 number of GOB. + * All GOBs and blocks have the same width: 64 bytes (so 16 pixels in 32bits). + * tile_mode is the log2 of the number of GOB per block. + */ + +#define NV_TILE_GOB_HEIGHT_TESLA 4 /* 4 x 64 bytes = 256 bytes for a GOB on Tesla*/ +#define NV_TILE_GOB_HEIGHT 8 /* 8 x 64 bytes = 512 bytes for a GOB on Fermi and later */ +#define NV_TILE_GOB_WIDTH_BYTES 64 + +/* Number of blocks to cover the width of the framebuffer */ +static inline u32 nouveau_get_width_in_blocks(u32 stride) +{ + return DIV_ROUND_UP(stride, NV_TILE_GOB_WIDTH_BYTES); +} + +/* Return the height in pixel of one GOB */ +static inline u32 nouveau_get_gob_height(u16 family) +{ + if (family == NV_DEVICE_INFO_V0_TESLA) + return NV_TILE_GOB_HEIGHT_TESLA; + else + return NV_TILE_GOB_HEIGHT; +} + +/* Number of blocks to cover the heigth of the framebuffer */ +static inline u32 nouveau_get_height_in_blocks(u32 height, u32 gobs_in_block, u16 family) +{ + return DIV_ROUND_UP(height, nouveau_get_gob_height(family) * gobs_in_block); +} + +/* Return the GOB size in bytes */ +static inline u32 nouveau_get_gob_size(u16 family) +{ + return nouveau_get_gob_height(family) * NV_TILE_GOB_WIDTH_BYTES; +} + +/* Return the number of GOB in a block */ +static inline int nouveau_get_gobs_in_block(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return 1 << (tile_mode >> 4); + return 1 << tile_mode; +} + +/* Return true if tile_mode is invalid */ +static inline bool nouveau_check_tile_mode(u32 tile_mode, u16 chipset) +{ + if (chipset >= 0xc0) + return (tile_mode & 0xfffff0f); + return (tile_mode & 0xfffffff0); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 7a2cceaee6e9..f6be426dd525 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -30,14 +30,20 @@ #include <nvhw/class/cl507e.h> #include <nvhw/class/clc37e.h> +#include <linux/iosys-map.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_panic.h> +#include <drm/ttm/ttm_bo.h> #include "nouveau_bo.h" #include "nouveau_gem.h" +#include "tile.h" static void nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma) @@ -577,6 +583,114 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; } +/* Only used by drm_panic get_scanout_buffer() and set_pixel(), so it is + * protected by the drm panic spinlock + */ +static u32 nv50_panic_blk_h; + +/* Return the framebuffer offset of the start of the block where pixel(x,y) is */ +static u32 +nv50_get_block_off(unsigned int x, unsigned int y, unsigned int pitch) +{ + u32 blk_x, blk_y, blk_columns; + + blk_columns = nouveau_get_width_in_blocks(pitch); + blk_x = (x * 4) / NV_TILE_GOB_WIDTH_BYTES; + blk_y = y / nv50_panic_blk_h; + + return ((blk_y * blk_columns) + blk_x) * NV_TILE_GOB_WIDTH_BYTES * nv50_panic_blk_h; +} + +/* Turing and later have 2 level of tiles inside the block */ +static void +nv50_set_pixel_swizzle(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + u32 blk_off, off, swizzle; + + blk_off = nv50_get_block_off(x, y, sb->pitch[0]); + + y = y % nv50_panic_blk_h; + + /* Inside the block, use the fast address swizzle to compute the offset + * For nvidia blocklinear, bit order is yn..y3 x3 y2 x2 y1 y0 x1 x0 + */ + swizzle = (x & 3) | (y & 3) << 2 | (x & 4) << 2 | (y & 4) << 3; + swizzle |= (x & 8) << 3 | (y >> 3) << 7; + off = blk_off + swizzle * 4; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static void +nv50_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, unsigned int y, + u32 color) +{ + u32 blk_off, off; + + blk_off = nv50_get_block_off(x, y, sb->width); + + x = x % (NV_TILE_GOB_WIDTH_BYTES / 4); + y = y % nv50_panic_blk_h; + off = blk_off + x * 4 + y * NV_TILE_GOB_WIDTH_BYTES; + + iosys_map_wr(&sb->map[0], off, u32, color); +} + +static int +nv50_wndw_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb) +{ + struct drm_framebuffer *fb; + struct nouveau_bo *nvbo; + struct nouveau_drm *drm = nouveau_drm(plane->dev); + u16 chipset = drm->client.device.info.chipset; + u8 family = drm->client.device.info.family; + u32 tile_mode; + u8 kind; + + if (!plane->state || !plane->state->fb) + return -EINVAL; + + fb = plane->state->fb; + nvbo = nouveau_gem_object(fb->obj[0]); + + /* Don't support compressed format, or multiplane yet. */ + if (nvbo->comp || fb->format->num_planes != 1) + return -EOPNOTSUPP; + + if (nouveau_bo_map(nvbo)) { + drm_warn(plane->dev, "nouveau bo map failed, panic won't be displayed\n"); + return -ENOMEM; + } + + if (nvbo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK) + iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)nvbo->kmap.virtual); + else + iosys_map_set_vaddr(&sb->map[0], nvbo->kmap.virtual); + + sb->height = fb->height; + sb->width = fb->width; + sb->pitch[0] = fb->pitches[0]; + sb->format = fb->format; + + nouveau_framebuffer_get_layout(fb, &tile_mode, &kind); + if (kind) { + /* If tiling is enabled, use set_pixel() to display correctly. + * Only handle 32bits format for now. + */ + if (fb->format->cpp[0] != 4) + return -EOPNOTSUPP; + nv50_panic_blk_h = nouveau_get_gob_height(family) * + nouveau_get_gobs_in_block(tile_mode, chipset); + + if (chipset >= 0x160) + sb->set_pixel = nv50_set_pixel_swizzle; + else + sb->set_pixel = nv50_set_pixel; + } + return 0; +} + static const struct drm_plane_helper_funcs nv50_wndw_helper = { .prepare_fb = nv50_wndw_prepare_fb, @@ -584,6 +698,14 @@ nv50_wndw_helper = { .atomic_check = nv50_wndw_atomic_check, }; +static const struct drm_plane_helper_funcs +nv50_wndw_primary_helper = { + .prepare_fb = nv50_wndw_prepare_fb, + .cleanup_fb = nv50_wndw_cleanup_fb, + .atomic_check = nv50_wndw_atomic_check, + .get_scanout_buffer = nv50_wndw_get_scanout_buffer, +}; + static void nv50_wndw_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) @@ -732,7 +854,10 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, return ret; } - drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&wndw->plane, &nv50_wndw_primary_helper); + else + drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper); if (wndw->func->ilut) { ret = nv50_lut_init(disp, mmu, &wndw->ilut); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b06aa473102b..8d5c9c74cbb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -477,14 +477,14 @@ nouveau_connector_of_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder; struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *cn, *dn = pci_device_to_OF_node(pdev); + struct device_node *dn = pci_device_to_OF_node(pdev); if (!dn || !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) || (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG)))) return NULL; - for_each_child_of_node(dn, cn) { + for_each_child_of_node_scoped(dn, cn) { const char *name = of_get_property(cn, "name", NULL); const void *edid = of_get_property(cn, "EDID", NULL); int idx = name ? name[strlen(name) - 1] - 'A' : 0; @@ -492,7 +492,6 @@ nouveau_connector_of_detect(struct drm_connector *connector) if (nv_encoder->dcb->i2c_index == idx && edid) { nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); - of_node_put(cn); return nv_encoder; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e2fd561cd23f..add006fc8d81 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -28,8 +28,8 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -44,6 +44,7 @@ #include <nvif/if0011.h> #include <nvif/if0013.h> #include <dispnv50/crc.h> +#include <dispnv50/tile.h> int nouveau_display_vblank_enable(struct drm_crtc *crtc) @@ -220,69 +221,29 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, return 0; } -static inline uint32_t -nouveau_get_width_in_blocks(uint32_t stride) -{ - /* GOBs per block in the x direction is always one, and GOBs are - * 64 bytes wide - */ - static const uint32_t log_block_width = 6; - - return (stride + (1 << log_block_width) - 1) >> log_block_width; -} - -static inline uint32_t -nouveau_get_height_in_blocks(struct nouveau_drm *drm, - uint32_t height, - uint32_t log_block_height_in_gobs) -{ - uint32_t log_gob_height; - uint32_t log_block_height; - - BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - log_gob_height = 2; - else - log_gob_height = 3; - - log_block_height = log_block_height_in_gobs + log_gob_height; - - return (height + (1 << log_block_height) - 1) >> log_block_height; -} - static int nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, uint32_t offset, uint32_t stride, uint32_t h, uint32_t tile_mode) { - uint32_t gob_size, bw, bh; + uint32_t gob_size, bw, bh, gobs_in_block; uint64_t bl_size; BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); - if (drm->client.device.info.chipset >= 0xc0) { - if (tile_mode & 0xF) - return -EINVAL; - tile_mode >>= 4; - } - - if (tile_mode & 0xFFFFFFF0) + if (nouveau_check_tile_mode(tile_mode, drm->client.device.info.chipset)) return -EINVAL; - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) - gob_size = 256; - else - gob_size = 512; - + gobs_in_block = nouveau_get_gobs_in_block(tile_mode, drm->client.device.info.chipset); bw = nouveau_get_width_in_blocks(stride); - bh = nouveau_get_height_in_blocks(drm, h, tile_mode); + bh = nouveau_get_height_in_blocks(h, gobs_in_block, drm->client.device.info.family); + gob_size = nouveau_get_gob_size(drm->client.device.info.family); - bl_size = bw * bh * (1 << tile_mode) * gob_size; + bl_size = bw * bh * gobs_in_block * gob_size; - DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", - offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, - nvbo->bo.base.size); + DRM_DEBUG_KMS("offset=%u stride=%u h=%u gobs_in_block=%u bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", + offset, stride, h, gobs_in_block, bw, bh, gob_size, + bl_size, nvbo->bo.base.size); if (bl_size + offset > nvbo->bo.base.size) return -ERANGE; @@ -804,8 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); - /* Disable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true); + drm_client_dev_suspend(dev, false); if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { @@ -836,8 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) } } - /* Enable console. */ - drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false); + drm_client_dev_resume(dev, false); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 34985771b2a2..107f63f08bd9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ +#include <linux/aperture.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> @@ -30,7 +31,7 @@ #include <linux/mmu_notifier.h> #include <linux/dynamic_debug.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_ttm_helper.h> @@ -836,6 +837,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, { struct nvkm_device *device; struct nouveau_drm *drm; + const struct drm_format_info *format; int ret; if (vga_switcheroo_client_probe_defer(pdev)) @@ -849,7 +851,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, return ret; /* Remove conflicting drivers (vesafb, efifb etc). */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci); + ret = aperture_remove_conflicting_pci_devices(pdev, driver_pci.name); if (ret) return ret; @@ -873,9 +875,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev, goto fail_pci; if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) - drm_fbdev_ttm_setup(drm->dev, 8); + format = drm_format_info(DRM_FORMAT_C8); else - drm_fbdev_ttm_setup(drm->dev, 32); + format = NULL; + + drm_client_setup(drm->dev, format); quirk_broken_nv_runpm(pdev); return 0; @@ -1318,6 +1322,8 @@ driver_stub = { .dumb_create = nouveau_display_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, + DRM_FBDEV_TTM_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index eb6c3f9a01f5..4412f2711fb5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job) else NV_PRINTK(warn, job->cli, "Generic job timeout.\n"); - drm_sched_start(sched); + drm_sched_start(sched, 0); return stat; } diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index ab4e11dc0b8a..a6c375a24154 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -2,7 +2,7 @@ #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_client_event.h> #include "nouveau_drv.h" #include "nouveau_acpi.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d1c294f00665..78a83f904bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); if (device_iommu_mapped(dev)) { - tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); - if (!tdev->iommu.domain) + tdev->iommu.domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(tdev->iommu.domain)) goto error; /* diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 060c74a80eb1..3ea447f6a45b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch, ret = gf100_grctx_generate(gr, chan, fifoch->inst); if (ret) { nvkm_error(&base->engine.subdev, "failed to construct context\n"); + mutex_unlock(&gr->fecs.mutex); return ret; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 819703913a00..2c551bdc9bc9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild @@ -25,7 +25,7 @@ nvkm-y += nvkm/subdev/i2c/busnv50.o nvkm-y += nvkm/subdev/i2c/busgf119.o nvkm-y += nvkm/subdev/i2c/bit.o -nvkm-y += nvkm/subdev/i2c/aux.o +nvkm-y += nvkm/subdev/i2c/auxch.o nvkm-y += nvkm/subdev/i2c/auxg94.o nvkm-y += nvkm/subdev/i2c/auxgf119.o nvkm-y += nvkm/subdev/i2c/auxgm200.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c index dd391809fef7..6c76e5e14b75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c @@ -24,7 +24,7 @@ #define anx9805_pad(p) container_of((p), struct anx9805_pad, base) #define anx9805_bus(p) container_of((p), struct anx9805_bus, base) #define anx9805_aux(p) container_of((p), struct anx9805_aux, base) -#include "aux.h" +#include "auxch.h" #include "bus.h" struct anx9805_pad { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c index d063d0dc13c5..fafc634acbf6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c @@ -24,7 +24,7 @@ #include <linux/string_helpers.h> -#include "aux.h" +#include "auxch.h" #include "pad.h" static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h index f920eabf8628..f920eabf8628 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index 47068f6f9c55..854bb4b5fdb4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ #define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base) -#include "aux.h" +#include "auxch.h" struct g94_i2c_aux { struct nvkm_i2c_aux base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c index dab40cd8fe3a..c17d5647cb99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c @@ -19,7 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include "aux.h" +#include "auxch.h" static const struct nvkm_i2c_aux_func gf119_i2c_aux = { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 8bd1d442e465..3c5005e3b330 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ #define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base) -#include "aux.h" +#include "auxch.h" struct gm200_i2c_aux { struct nvkm_i2c_aux base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 731b2f68d3db..7ec17e8435a1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "priv.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" #include "pad.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c index 5904bc5f2d2a..cc26cd677917 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index 3bc4d0310076..1797c6c65979 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" static const struct nvkm_i2c_pad_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c index 7d417f6a816e..5afc1bf8e798 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c @@ -22,7 +22,7 @@ * Authors: Ben Skeggs */ #include "pad.h" -#include "aux.h" +#include "auxch.h" #include "bus.h" static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index a17a6dd8d3de..803b98df4858 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -142,7 +142,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp) return -ENODEV; } - result = min(max(result, (s64)info.min), (s64)info.max); + result = clamp(result, (s64)info.min, (s64)info.max); if (info.link != 0xff) { int ret = nvkm_volt_map(volt, info.link, temp); diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index fbd9af758581..9d4016bd0f44 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -4,6 +4,7 @@ config DRM_OMAP depends on MMU depends on DRM && OF depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 5f8002f6bb7a..a4ac113e1690 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -139,21 +139,13 @@ static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) } int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "connect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - /* - * The destination is NULL when the source is connected to a - * bridge instead of a DSS device. Stop here, we will attach - * the bridge later when we will have a DRM encoder. - */ - return src && src->bridge ? 0 : -EINVAL; - } + if (!dst) + return -EINVAL; if (omapdss_device_is_connected(dst)) return -EBUSY; @@ -163,19 +155,14 @@ int omapdss_device_connect(struct dss_device *dss, return 0; } -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst) { - struct dss_device *dss = src ? src->dss : dst->dss; - - dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "disconnect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - WARN_ON(!src->bridge); + if (WARN_ON(!dst)) return; - } if (!dst->id && !omapdss_device_is_connected(dst)) { WARN_ON(1); diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 993691b3cc7e..9344855c4887 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -691,11 +691,6 @@ u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, return mgr_desc[channel].sync_lost_irq; } -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc) -{ - return DISPC_IRQ_FRAMEDONEWB; -} - void dispc_mgr_enable(struct dispc_device *dispc, enum omap_channel channel, bool enable) { @@ -726,30 +721,6 @@ void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1); } -bool dispc_wb_go_busy(struct dispc_device *dispc) -{ - return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; -} - -void dispc_wb_go(struct dispc_device *dispc) -{ - enum omap_plane_id plane = OMAP_DSS_WB; - bool enable, go; - - enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1; - - if (!enable) - return; - - go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; - if (go) { - DSSERR("GO bit not down for WB\n"); - return; - } - - REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6); -} - static void dispc_ovl_write_firh_reg(struct dispc_device *dispc, enum omap_plane_id plane, int reg, u32 value) @@ -1498,17 +1469,6 @@ void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc, min(high, 0xfffu)); } -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable) -{ - if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) { - WARN_ON(enable); - return; - } - - DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); - REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14); -} - void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, enum omap_plane_id plane, u32 *fifo_low, u32 *fifo_high, @@ -2814,95 +2774,6 @@ int dispc_ovl_setup(struct dispc_device *dispc, return r; } -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in) -{ - int r; - u32 l; - enum omap_plane_id plane = OMAP_DSS_WB; - const int pos_x = 0, pos_y = 0; - const u8 zorder = 0, global_alpha = 0; - const bool replication = true; - bool truncation; - int in_width = vm->hactive; - int in_height = vm->vactive; - enum omap_overlay_caps caps = - OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - in_height /= 2; - - DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, " - "rot %d\n", wi->paddr, wi->p_uv_addr, in_width, - in_height, wi->width, wi->height, wi->fourcc, wi->rotation); - - r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr, - wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, - wi->height, wi->fourcc, wi->rotation, zorder, - wi->pre_mult_alpha, global_alpha, wi->rotation_type, - replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601, - DRM_COLOR_YCBCR_LIMITED_RANGE); - if (r) - return r; - - switch (wi->fourcc) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_RGB888: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XRGB4444: - truncation = true; - break; - default: - truncation = false; - break; - } - - /* setup extra DISPC_WB_ATTRIBUTES */ - l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane)); - l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ - l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */ - l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ - if (mem_to_mem) - l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ - else - l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ - dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l); - - if (mem_to_mem) { - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0); - } else { - u32 wbdelay; - - if (channel_in == DSS_WB_TV_MGR) - wbdelay = vm->vsync_len + vm->vback_porch; - else - wbdelay = vm->vfront_porch + vm->vsync_len + - vm->vback_porch; - - if (vm->flags & DISPLAY_FLAGS_INTERLACED) - wbdelay /= 2; - - wbdelay = min(wbdelay, 255u); - - /* WBDELAYCOUNT */ - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0); - } - - return 0; -} - -bool dispc_has_writeback(struct dispc_device *dispc) -{ - return dispc->feat->has_writeback; -} - int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable) { @@ -3742,23 +3613,6 @@ void dispc_mgr_set_clock_div(struct dispc_device *dispc, cinfo->pck_div); } -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo) -{ - unsigned long fck; - - fck = dispc_fclk_rate(dispc); - - cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16); - cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0); - - cinfo->lck = fck / cinfo->lck_div; - cinfo->pck = cinfo->lck / cinfo->pck_div; - - return 0; -} - u32 dispc_read_irqstatus(struct dispc_device *dispc) { return dispc_read_reg(dispc, DISPC_IRQSTATUS); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 030f997eccd0..b17e77f700dd 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/string.h> @@ -709,7 +710,7 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, if (!dpi) return -ENOMEM; - ep = of_get_next_child(port, NULL); + ep = of_graph_get_next_port_endpoint(port, NULL); if (!ep) return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 4ff02fbc0e71..a8b231ed4f4b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -416,7 +416,6 @@ u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, enum omap_channel channel); u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, enum omap_channel channel); -u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc); u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc); @@ -458,20 +457,11 @@ int dispc_ovl_setup(struct dispc_device *dispc, int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable); -bool dispc_has_writeback(struct dispc_device *dispc); -int dispc_wb_setup(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in); -bool dispc_wb_go_busy(struct dispc_device *dispc); -void dispc_wb_go(struct dispc_device *dispc); - void dispc_enable_sidle(struct dispc_device *dispc); void dispc_disable_sidle(struct dispc_device *dispc); void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable); void dispc_pck_free_enable(struct dispc_device *dispc, bool enable); -void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable); typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck, unsigned long pck, void *data); @@ -494,9 +484,6 @@ void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc, void dispc_mgr_set_clock_div(struct dispc_device *dispc, enum omap_channel channel, const struct dispc_clock_info *cinfo); -int dispc_mgr_get_clock_div(struct dispc_device *dispc, - enum omap_channel channel, - struct dispc_clock_info *cinfo); void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 040d5a3e33d6..4c22c09c93d5 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -242,9 +242,8 @@ struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); void omapdss_device_put(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node); int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst); -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst); int omap_dss_get_num_overlay_managers(void); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 91eaae3b9481..f9ae358e8e52 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/string.h> @@ -346,7 +347,7 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, if (!sdi) return -ENOMEM; - ep = of_get_next_child(port, NULL); + ep = of_graph_get_next_port_endpoint(port, NULL); if (!ep) { r = 0; goto err_free; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1aca3060333e..fcd600024136 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -119,7 +119,7 @@ static u32 dmm_read_wa(struct dmm *dmm, u32 reg) * earlier than the DMA finished writing the value to memory. */ rmb(); - return readl(dmm->wa_dma_data); + return readl((__iomem void *)dmm->wa_dma_data); } static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg) @@ -127,7 +127,7 @@ static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg) dma_addr_t src, dst; int r; - writel(val, dmm->wa_dma_data); + writel(val, (__iomem void *)dmm->wa_dma_data); /* * As per i878 workaround, the DMA is used to access the DMM registers. * Make sure that the writel is not moved by the compiler or the CPU, so @@ -411,7 +411,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) */ /* read back to ensure the data is in RAM */ - readl(&txn->last_pat->next_pa); + readl((__iomem void *)&txn->last_pat->next_pa); /* write to PAT_DESCR to clear out any pending transaction */ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d3eac4817d76..1796cd20a877 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -307,7 +307,7 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; - omapdss_device_disconnect(NULL, pipe->output); + omapdss_device_disconnect(priv->dss, pipe->output); omapdss_device_put(pipe->output); pipe->output = NULL; @@ -325,7 +325,7 @@ static int omap_connect_pipelines(struct drm_device *ddev) int r; for_each_dss_output(output) { - r = omapdss_device_connect(priv->dss, NULL, output); + r = omapdss_device_connect(priv->dss, output); if (r == -EPROBE_DEFER) { omapdss_device_put(output); return r; @@ -647,6 +647,7 @@ static const struct drm_driver omap_drm_driver = { .gem_prime_import = omap_gem_prime_import, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, + OMAP_FBDEV_DRIVER_OPS, .ioctls = ioctls, .num_ioctls = DRM_OMAP_NUM_IOCTLS, .fops = &omapdriver_fops, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 4c7217b35f6b..d903568fd8cc 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -32,6 +32,7 @@ #define MODULE_NAME "omapdrm" struct omap_drm_usergart; +struct omap_fbdev; struct omap_drm_pipeline { struct drm_crtc *crtc; @@ -97,6 +98,8 @@ struct omap_drm_private { /* memory bandwidth limit if it is needed on the platform */ unsigned int max_bandwidth; + + struct omap_fbdev *fbdev; }; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 523be34682ca..f4bd0c6e3f34 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -6,6 +6,7 @@ #include <linux/fb.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> @@ -13,6 +14,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_util.h> #include "omap_drv.h" @@ -26,10 +28,8 @@ module_param_named(ywrap, ywrap_enabled, bool, 0644); * fbdev funcs, to implement legacy fbdev interface on top of drm driver */ -#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base) - struct omap_fbdev { - struct drm_fb_helper base; + struct drm_device *dev; bool ywrap_enabled; /* for deferred dmm roll when getting called in atomic ctx */ @@ -41,7 +41,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi); static void pan_worker(struct work_struct *work) { struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); - struct drm_fb_helper *helper = &fbdev->base; + struct drm_fb_helper *helper = fbdev->dev->fb_helper; struct fb_info *fbi = helper->info; struct drm_gem_object *bo = drm_gem_fb_get_obj(helper->fb, 0); int npages; @@ -55,24 +55,25 @@ FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev, drm_fb_helper_damage_range, drm_fb_helper_damage_area) -static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, - struct fb_info *fbi) +static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { struct drm_fb_helper *helper = get_fb(fbi); - struct omap_fbdev *fbdev = to_omap_fbdev(helper); + struct omap_drm_private *priv; + struct omap_fbdev *fbdev; if (!helper) goto fallback; + priv = helper->dev->dev_private; + fbdev = priv->fbdev; + if (!fbdev->ywrap_enabled) goto fallback; - if (drm_can_sleep()) { + if (drm_can_sleep()) pan_worker(&fbdev->work); - } else { - struct omap_drm_private *priv = helper->dev->dev_private; + else queue_work(priv->wq, &fbdev->work); - } return 0; @@ -92,7 +93,6 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) struct drm_fb_helper *helper = info->par; struct drm_framebuffer *fb = helper->fb; struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0); - struct omap_fbdev *fbdev = to_omap_fbdev(helper); DBG(); @@ -104,7 +104,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) drm_client_release(&helper->client); drm_fb_helper_unprepare(helper); - kfree(fbdev); + kfree(helper); } /* @@ -125,12 +125,36 @@ static const struct fb_ops omap_fb_ops = { .fb_destroy = omap_fbdev_fb_destroy, }; -static int omap_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) +{ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) + return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + + return 0; +} + +static const struct drm_fb_helper_funcs omap_fbdev_helper_funcs = { + .fb_dirty = omap_fbdev_dirty, +}; + +static struct drm_fb_helper *get_fb(struct fb_info *fbi) +{ + if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) { + /* these are not the fb's you're looking for */ + return NULL; + } + return fbi->par; +} + +int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { - struct omap_fbdev *fbdev = to_omap_fbdev(helper); struct drm_device *dev = helper->dev; struct omap_drm_private *priv = dev->dev_private; + struct omap_fbdev *fbdev = priv->fbdev; struct drm_framebuffer *fb = NULL; union omap_gem_size gsize; struct fb_info *fbi = NULL; @@ -208,6 +232,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, DBG("fbi=%p, dev=%p", fbi, dev); + helper->funcs = &omap_fbdev_helper_funcs; helper->fb = fb; fbi->fbops = &omap_fb_ops; @@ -254,115 +279,21 @@ fail: return ret; } -static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) -{ - if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) - return 0; - - if (helper->fb->funcs->dirty) - return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); - - return 0; -} - -static const struct drm_fb_helper_funcs omap_fb_helper_funcs = { - .fb_probe = omap_fbdev_create, - .fb_dirty = omap_fbdev_dirty, -}; - -static struct drm_fb_helper *get_fb(struct fb_info *fbi) -{ - if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) { - /* these are not the fb's you're looking for */ - return NULL; - } - return fbi->par; -} - -/* - * struct drm_client - */ - -static void omap_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int omap_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int omap_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs omap_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = omap_fbdev_client_unregister, - .restore = omap_fbdev_client_restore, - .hotplug = omap_fbdev_client_hotplug, -}; - void omap_fbdev_setup(struct drm_device *dev) { + struct omap_drm_private *priv = dev->dev_private; struct omap_fbdev *fbdev; - struct drm_fb_helper *helper; - int ret; drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + fbdev = drmm_kzalloc(dev, sizeof(*fbdev), GFP_KERNEL); if (!fbdev) return; - helper = &fbdev->base; - - drm_fb_helper_prepare(dev, helper, 32, &omap_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &omap_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - + fbdev->dev = dev; INIT_WORK(&fbdev->work, pan_worker); - drm_client_register(&helper->client); + priv->fbdev = fbdev; - return; - -err_drm_client_init: - drm_fb_helper_unprepare(helper); - kfree(fbdev); + drm_client_setup(dev, NULL); } diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h index 74c691a8d45f..283e35b42ada 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.h +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h @@ -10,10 +10,18 @@ #define __OMAPDRM_FBDEV_H__ struct drm_device; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION +int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define OMAP_FBDEV_DRIVER_OPS \ + .fbdev_probe = omap_fbdev_driver_fbdev_probe void omap_fbdev_setup(struct drm_device *dev); #else +#define OMAP_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL static inline void omap_fbdev_setup(struct drm_device *dev) { } diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index fdae677558f3..b9c67e4ca360 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1402,8 +1402,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj = to_omap_bo(obj); - mutex_lock(&omap_obj->lock); - omap_obj->sgt = sgt; if (omap_gem_sgt_is_contiguous(sgt, size)) { @@ -1418,21 +1416,17 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); if (!pages) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } omap_obj->pages = pages; ret = drm_prime_sg_to_page_array(sgt, pages, npages); if (ret) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } } -done: - mutex_unlock(&omap_obj->lock); return obj; } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index d3a9a9fafe4e..d7469c565d1d 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -378,7 +378,7 @@ config DRM_PANEL_LG_SW43408 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for LG sw43408 panel. @@ -587,7 +587,7 @@ config DRM_PANEL_RAYDIUM_RM692E5 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Raydium RM692E5-based @@ -614,6 +614,34 @@ config DRM_PANEL_RONBO_RB070D30 Say Y here if you want to enable support for Ronbo Electronics RB070D30 1024x600 DSI panel. +config DRM_PANEL_SAMSUNG_AMS581VF01 + tristate "Samsung AMS581VF01 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y or M here if you want to enable support for the + Samsung AMS581VF01 FHD Plus (2340x1080@60Hz) CMD mode panel. + +config DRM_PANEL_SAMSUNG_AMS639RQ08 + tristate "Samsung AMS639RQ08 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y or M here if you want to enable support for the + Samsung AMS639RQ08 FHD Plus (2340x1080@60Hz) CMD mode panel. + +config DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24 + tristate "Samsung AMS427AP24 panel with S6E88A0 controller" + depends on GPIOLIB && OF && REGULATOR + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Samsung AMS427AP24 panel + with S6E88A0 controller (found in Samsung Galaxy S4 Mini Value Edition + GT-I9195I). To compile this driver as a module, choose M here. + config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller" depends on OF @@ -689,6 +717,14 @@ config DRM_PANEL_SAMSUNG_S6E3HA2 depends on BACKLIGHT_CLASS_DEVICE select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6E3HA8 + tristate "Samsung S6E3HA8 DSI video mode panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DSC_HELPER + select VIDEOMODE_HELPERS + config DRM_PANEL_SAMSUNG_S6E63J0X03 tristate "Samsung S6E63J0X03 DSI command mode panel" depends on OF @@ -946,7 +982,7 @@ config DRM_PANEL_VISIONOX_R66451 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Visionox diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 987a08702410..7dcf72646cac 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -62,6 +62,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o @@ -70,10 +72,12 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA8) += panel-samsung-s6e3ha8.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 767e47a2b0c1..8566e9cf2f82 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1977,11 +1977,13 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"), EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"), + EDP_PANEL_ENTRY('L', 'G', 'D', 0x0778, &delay_200_500_e200_d200, "134WT1"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index 00791ea81e90..b904d5437444 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -50,55 +50,44 @@ static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel) return container_of(panel, struct kd35t133, panel); } -static int kd35t133_init_sequence(struct kd35t133 *ctx) +static void kd35t133_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - struct device *dev = ctx->dev; - /* * Init sequence was supplied by the panel vendor with minimal * documentation. */ - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA, - 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56, - 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA, - 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34, - 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL, - 0x20, 0x02); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00); - mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3, - 0xa9, 0x51, 0x2c, 0x82); - mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0); - - dev_dbg(dev, "Panel init sequence done\n"); - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POSITIVEGAMMA, + 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56, + 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_NEGATIVEGAMMA, + 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34, + 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL2, 0x41); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x48); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_INTERFACEMODECTRL, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_FRAMERATECTRL, 0xa0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYFUNCTIONCTRL, + 0x20, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_SETIMAGEFUNCTION, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_ADJUSTCONTROL3, + 0xa9, 0x51, 0x2c, 0x82); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_ENTER_INVERT_MODE); } static int kd35t133_unprepare(struct drm_panel *panel) { struct kd35t133 *ctx = panel_to_kd35t133(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) - dev_err(ctx->dev, "failed to set display off: %d\n", ret); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; gpiod_set_value_cansleep(ctx->reset_gpio, 1); @@ -112,18 +101,20 @@ static int kd35t133_prepare(struct drm_panel *panel) { struct kd35t133 *ctx = panel_to_kd35t133(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dev_dbg(ctx->dev, "Resetting the panel\n"); - ret = regulator_enable(ctx->vdd); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret); - return ret; + dsi_ctx.accum_err = regulator_enable(ctx->vdd); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", + dsi_ctx.accum_err); + return dsi_ctx.accum_err; } - ret = regulator_enable(ctx->iovcc); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); + dsi_ctx.accum_err = regulator_enable(ctx->iovcc); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", + dsi_ctx.accum_err); goto disable_vdd; } @@ -135,27 +126,18 @@ static int kd35t133_prepare(struct drm_panel *panel) msleep(20); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - goto disable_iovcc; - } + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 250); - msleep(250); + kd35t133_init_sequence(&dsi_ctx); + if (!dsi_ctx.accum_err) + dev_dbg(ctx->dev, "Panel init sequence done\n"); - ret = kd35t133_init_sequence(ctx); - if (ret < 0) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - goto disable_iovcc; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + if (dsi_ctx.accum_err) goto disable_iovcc; - } - - msleep(50); return 0; @@ -163,7 +145,7 @@ disable_iovcc: regulator_disable(ctx->iovcc); disable_vdd: regulator_disable(ctx->vdd); - return ret; + return dsi_ctx.accum_err; } static const struct drm_display_mode default_mode = { diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c index 466c27012abf..47bce087e339 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c @@ -56,198 +56,173 @@ static void hx83112a_reset(struct hx83112a_panel *ctx) msleep(50); } -static int hx83112a_on(struct hx83112a_panel *ctx) +static int hx83112a_on(struct mipi_dsi_device *dsi) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1, - 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP, - 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19, - 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, - 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0, - 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff, - 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07, - 0x12, 0x00, 0x29); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV, - 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00, - 0x53); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, - 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, - 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, - 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, - 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, - 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, - 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, - 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, - 0x40); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON, - 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, - 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07, - 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10, - 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31, - 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00, - 0x0f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0, - 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, - 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03, - 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31, - 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, - 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, - 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00, - 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31, - 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, - 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e, - 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa, - 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff, - 0xff, 0xff, 0xff, 0xff); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3, - 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50, - 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05, - 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1, - 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6); - mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37); - mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f); - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(150); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(50); - - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETEXTC, 0x83, 0x11, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER1, + 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDISP, + 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19, + 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV, + 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0, + 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff, + 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07, + 0x12, 0x00, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV, + 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00, + 0x53); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, + 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6, + 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6, + 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d, + 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49, + 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a, + 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3, + 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad, + 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTCON, + 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPANEL, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER2, 0x2b, 0x2b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, + 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07, + 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10, + 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00, + 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0, + 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP1, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03, + 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP2, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, + 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f, + 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00, + 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, + 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e, + 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff, + 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3, + 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50, + 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05, + 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1, + 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETCLOCK, 0xd1, 0xd6); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc6); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPTBA, 0x37); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 150); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); + + return dsi_ctx.accum_err; } static int hx83112a_disable(struct drm_panel *panel) { struct hx83112a_panel *ctx = to_hx83112a_panel(panel); struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(20); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - return 0; + return dsi_ctx.accum_err; } static int hx83112a_prepare(struct drm_panel *panel) { struct hx83112a_panel *ctx = to_hx83112a_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } hx83112a_reset(ctx); - ret = hx83112a_on(ctx); + ret = hx83112a_on(ctx->dsi); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - return ret; } - return 0; + return ret; } static int hx83112a_unprepare(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 4a6dcfd781e8..94b7dfef3b5e 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -318,7 +318,7 @@ static int ili9322_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, buf, 1, val, 1); } -static struct regmap_bus ili9322_regmap_bus = { +static const struct regmap_bus ili9322_regmap_bus = { .write = ili9322_regmap_spi_write, .read = ili9322_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 1fbc5d433d75..ff39f5dd4097 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -13,9 +13,6 @@ * Derived from drivers/drm/gpu/panel/panel-ilitek-ili9322.c * the reuse of DBI abstraction part referred from Linus's patch * "drm/panel: s6e63m0: Switch to DBI abstraction for SPI" - * - * For only-dbi part, copy from David's code (drm/tiny/ili9341.c) - * Copyright 2018 David Lechner <david@lechnology.com> */ #include <linux/backlight.h> @@ -486,176 +483,6 @@ static const struct drm_panel_funcs ili9341_dpi_funcs = { .get_modes = ili9341_dpi_get_modes, }; -static void ili9341_dbi_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) -{ - struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); - struct mipi_dbi *dbi = &dbidev->dbi; - u8 addr_mode; - int ret, idx; - - if (!drm_dev_enter(pipe->crtc.dev, &idx)) - return; - - ret = mipi_dbi_poweron_conditional_reset(dbidev); - if (ret < 0) - goto out_exit; - if (ret == 1) - goto out_enable; - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); - - mipi_dbi_command(dbi, ILI9341_POWERB, 0x00, 0xc1, 0x30); - mipi_dbi_command(dbi, ILI9341_POWER_SEQ, 0x64, 0x03, 0x12, 0x81); - mipi_dbi_command(dbi, ILI9341_DTCA, 0x85, 0x00, 0x78); - mipi_dbi_command(dbi, ILI9341_POWERA, 0x39, 0x2c, 0x00, 0x34, 0x02); - mipi_dbi_command(dbi, ILI9341_PRC, ILI9341_DBI_PRC_NORMAL); - mipi_dbi_command(dbi, ILI9341_DTCB, 0x00, 0x00); - - /* Power Control */ - mipi_dbi_command(dbi, ILI9341_POWER1, ILI9341_DBI_VCOMH_4P6V); - mipi_dbi_command(dbi, ILI9341_POWER2, ILI9341_DBI_PWR_2_DEFAULT); - /* VCOM */ - mipi_dbi_command(dbi, ILI9341_VCOM1, ILI9341_DBI_VCOM_1_VMH_4P25V, - ILI9341_DBI_VCOM_1_VML_1P5V); - mipi_dbi_command(dbi, ILI9341_VCOM2, ILI9341_DBI_VCOM_2_DEC_58); - - /* Memory Access Control */ - mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, - MIPI_DCS_PIXEL_FMT_16BIT); - - /* Frame Rate */ - mipi_dbi_command(dbi, ILI9341_FRC, ILI9341_DBI_FRC_DIVA & 0x03, - ILI9341_DBI_FRC_RTNA & 0x1f); - - /* Gamma */ - mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, ILI9341_GAMMA_CURVE_1); - mipi_dbi_command(dbi, ILI9341_PGAMMA, - 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1, - 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00); - mipi_dbi_command(dbi, ILI9341_NGAMMA, - 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1, - 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f); - - /* DDRAM */ - mipi_dbi_command(dbi, ILI9341_ETMOD, ILI9341_DBI_EMS_GAS | - ILI9341_DBI_EMS_DTS | - ILI9341_DBI_EMS_GON); - - /* Display */ - mipi_dbi_command(dbi, ILI9341_DFC, 0x08, 0x82, 0x27, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); - msleep(100); - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); - msleep(100); - -out_enable: - switch (dbidev->rotation) { - default: - addr_mode = ILI9341_MADCTL_MX; - break; - case 90: - addr_mode = ILI9341_MADCTL_MV; - break; - case 180: - addr_mode = ILI9341_MADCTL_MY; - break; - case 270: - addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | - ILI9341_MADCTL_MX; - break; - } - - addr_mode |= ILI9341_MADCTL_BGR; - mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); - mipi_dbi_enable_flush(dbidev, crtc_state, plane_state); - drm_info(&dbidev->drm, "Initialized display serial interface\n"); -out_exit: - drm_dev_exit(idx); -} - -static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = { - DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable), -}; - -static const struct drm_display_mode ili9341_dbi_mode = { - DRM_SIMPLE_MODE(240, 320, 37, 49), -}; - -DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops); - -static struct drm_driver ili9341_dbi_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .fops = &ili9341_dbi_fops, - DRM_GEM_DMA_DRIVER_OPS_VMAP, - .debugfs_init = mipi_dbi_debugfs_init, - .name = "ili9341", - .desc = "Ilitek ILI9341", - .date = "20210716", - .major = 1, - .minor = 0, -}; - -static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, - struct gpio_desc *reset) -{ - struct device *dev = &spi->dev; - struct mipi_dbi_dev *dbidev; - struct mipi_dbi *dbi; - struct drm_device *drm; - struct regulator *vcc; - u32 rotation = 0; - int ret; - - vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) { - dev_err(dev, "get optional vcc failed\n"); - vcc = NULL; - } - - dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, - struct mipi_dbi_dev, drm); - if (IS_ERR(dbidev)) - return PTR_ERR(dbidev); - - dbi = &dbidev->dbi; - drm = &dbidev->drm; - dbi->reset = reset; - dbidev->regulator = vcc; - - drm_mode_config_init(drm); - - dbidev->backlight = devm_of_find_backlight(dev); - if (IS_ERR(dbidev->backlight)) - return PTR_ERR(dbidev->backlight); - - device_property_read_u32(dev, "rotation", &rotation); - - ret = mipi_dbi_spi_init(spi, dbi, dc); - if (ret) - return ret; - - ret = mipi_dbi_dev_init(dbidev, &ili9341_dbi_funcs, - &ili9341_dbi_mode, rotation); - if (ret) - return ret; - - drm_mode_config_reset(drm); - - ret = drm_dev_register(drm, 0); - if (ret) - return ret; - - spi_set_drvdata(spi, drm); - - drm_fbdev_dma_setup(drm, 0); - - return 0; -} - static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc, struct gpio_desc *reset) { @@ -711,7 +538,6 @@ static int ili9341_probe(struct spi_device *spi) struct device *dev = &spi->dev; struct gpio_desc *dc; struct gpio_desc *reset; - const struct spi_device_id *id = spi_get_device_id(spi); reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset)) @@ -721,36 +547,15 @@ static int ili9341_probe(struct spi_device *spi) if (IS_ERR(dc)) return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n"); - if (!strcmp(id->name, "sf-tc240t-9370-t")) - return ili9341_dpi_probe(spi, dc, reset); - - if (!strcmp(id->name, "yx240qv29")) - return ili9341_dbi_probe(spi, dc, reset); - - return -ENODEV; + return ili9341_dpi_probe(spi, dc, reset); } static void ili9341_remove(struct spi_device *spi) { - const struct spi_device_id *id = spi_get_device_id(spi); struct ili9341 *ili = spi_get_drvdata(spi); - struct drm_device *drm = spi_get_drvdata(spi); - - if (!strcmp(id->name, "sf-tc240t-9370-t")) { - ili9341_dpi_power_off(ili); - drm_panel_remove(&ili->panel); - } else if (!strcmp(id->name, "yx240qv29")) { - drm_dev_unplug(drm); - drm_atomic_helper_shutdown(drm); - } -} -static void ili9341_shutdown(struct spi_device *spi) -{ - const struct spi_device_id *id = spi_get_device_id(spi); - - if (!strcmp(id->name, "yx240qv29")) - drm_atomic_helper_shutdown(spi_get_drvdata(spi)); + ili9341_dpi_power_off(ili); + drm_panel_remove(&ili->panel); } static const struct of_device_id ili9341_of_match[] = { @@ -758,19 +563,11 @@ static const struct of_device_id ili9341_of_match[] = { .compatible = "st,sf-tc240t-9370-t", .data = &ili9341_stm32f429_disco_data, }, - { - /* porting from tiny/ili9341.c - * for original mipi dbi compitable - */ - .compatible = "adafruit,yx240qv29", - .data = NULL, - }, { } }; MODULE_DEVICE_TABLE(of, ili9341_of_match); static const struct spi_device_id ili9341_id[] = { - { "yx240qv29", 0 }, { "sf-tc240t-9370-t", 0 }, { } }; @@ -779,7 +576,6 @@ MODULE_DEVICE_TABLE(spi, ili9341_id); static struct spi_driver ili9341_driver = { .probe = ili9341_probe, .remove = ili9341_remove, - .shutdown = ili9341_shutdown, .id_table = ili9341_id, .driver = { .name = "panel-ilitek-ili9341", diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 084c37fa7348..28cd7560e5db 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -42,6 +42,7 @@ struct ili9881c_desc { const size_t init_length; const struct drm_display_mode *mode; const unsigned long mode_flags; + u8 default_address_mode; }; struct ili9881c { @@ -53,6 +54,7 @@ struct ili9881c { struct gpio_desc *reset; enum drm_panel_orientation orientation; + u8 address_mode; }; #define ILI9881C_SWITCH_PAGE_INSTR(_page) \ @@ -815,8 +817,6 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0xd1, 0x4b), ILI9881C_COMMAND_INSTR(0xd2, 0x60), ILI9881C_COMMAND_INSTR(0xd3, 0x39), - ILI9881C_SWITCH_PAGE_INSTR(0), - ILI9881C_COMMAND_INSTR(0x36, 0x03), }; static const struct ili9881c_instr w552946ab_init[] = { @@ -1299,6 +1299,14 @@ static int ili9881c_prepare(struct drm_panel *panel) if (ret) return ret; + if (ctx->address_mode) { + ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE, + &ctx->address_mode, + sizeof(ctx->address_mode)); + if (ret < 0) + return ret; + } + ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret) return ret; @@ -1463,6 +1471,10 @@ static int ili9881c_get_modes(struct drm_panel *panel, connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; + if (ctx->address_mode == 0x3) + connector->display_info.subpixel_order = SubPixelHorizontalBGR; + else + connector->display_info.subpixel_order = SubPixelHorizontalRGB; /* * TODO: Remove once all drm drivers call @@ -1521,6 +1533,12 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) return ret; } + ctx->address_mode = ctx->desc->default_address_mode; + if (ctx->orientation == DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP) { + ctx->address_mode ^= 0x03; + ctx->orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; + } + ctx->panel.prepare_prev_first = true; ret = drm_panel_of_backlight(&ctx->panel); @@ -1572,6 +1590,7 @@ static const struct ili9881c_desc tl050hdv35_desc = { .mode = &tl050hdv35_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM, + .default_address_mode = 0x03, }; static const struct ili9881c_desc w552946aba_desc = { diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 44897e5218a6..45d09e6fa667 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -26,7 +26,6 @@ struct jadard_panel_desc { unsigned int lanes; enum mipi_dsi_pixel_format format; int (*init)(struct jadard *jadard); - u32 num_init_cmds; bool lp11_before_reset; bool reset_before_power_off_vcioo; unsigned int vcioo_to_lp11_delay_ms; diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c index 14932cb3defc..0e5e8e57bd1e 100644 --- a/drivers/gpu/drm/panel/panel-khadas-ts050.c +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -617,12 +617,12 @@ static const struct khadas_ts050_panel_cmd ts050_init_code[] = { {0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */ }; -struct khadas_ts050_panel_data ts050_panel_data = { +static struct khadas_ts050_panel_data ts050_panel_data = { .init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code, .len = ARRAY_SIZE(ts050_init_code) }; -struct khadas_ts050_panel_data ts050v2_panel_data = { +static struct khadas_ts050_panel_data ts050v2_panel_data = { .init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code, .len = ARRAY_SIZE(ts050v2_init_code) }; diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index 292aa26a456d..77f74e6c467e 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -26,7 +26,7 @@ struct ltk050h3146w; struct ltk050h3146w_desc { const unsigned long mode_flags; const struct drm_display_mode *mode; - int (*init)(struct ltk050h3146w *ctx); + void (*init)(struct mipi_dsi_multi_context *dsi_ctx); }; struct ltk050h3146w { @@ -243,67 +243,57 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) return container_of(panel, struct ltk050h3146w, panel); } -static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3148w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, - 0x71, 0x31, 0x55, 0x2f); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, - 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, - 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, - 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, - 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, - 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, - 0x17, 0x11, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, - 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, - 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, - 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, - 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, - 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, - 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, - 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, - 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, - 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, - 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, - 0x5d, 0x61, 0x65, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, + 0x71, 0x31, 0x55, 0x2f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x88); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, + 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, + 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, + 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, + 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, + 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, + 0x17, 0x11, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, + 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, + 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, + 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, + 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, + 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, + 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, + 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, + 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, + 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, + 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, + 0x5d, 0x61, 0x65, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcc, 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x1f, 0x31); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0xc4, 0xc4); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc6, 0xef); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x02); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3148w_mode = { @@ -327,74 +317,64 @@ static const struct ltk050h3146w_desc ltk050h3148w_data = { MIPI_DSI_MODE_VIDEO_BURST, }; -static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, - 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); - - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, - 0x28, 0x04, 0xcc, 0xcc, 0xcc); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); - mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, - 0x80); - mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, - 0x16, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, - 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, - 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, - 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, - 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, - 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, - 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, - 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, - 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, - 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, - 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, - 0x21, 0x00, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); - mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00); - - ret = mipi_dsi_dcs_set_tear_on(dsi, 1); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdf, 0x93, 0x65, 0xf8); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, + 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x00, 0xb5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); + + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0x00, 0xc4, 0x23, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, + 0x28, 0x04, 0xcc, 0xcc, 0xcc); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x0f, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbe, 0x1e, 0xf2); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x26, 0x03); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x00, 0x12); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, + 0x80); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, + 0x16, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, + 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, + 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, + 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, + 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, + 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, + 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, + 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, + 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, + 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, + 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, + 0x21, 0x00, 0x60); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdd, 0x2c, 0xa3, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x32, 0x1c); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x3b, 0x70, 0x00, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x11); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc2, 0x20, 0x38, 0x1e, 0x84); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x00); + + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1); + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_mode = { @@ -418,79 +398,42 @@ static const struct ltk050h3146w_desc ltk050h3146w_data = { MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; -static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) +static void ltk050h3146w_a2_select_page(struct mipi_dsi_multi_context *dsi_ctx, int page) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - u8 d[3] = { 0x98, 0x81, page }; + u8 d[4] = { 0xff, 0x98, 0x81, page }; - return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); + mipi_dsi_dcs_write_buffer_multi(dsi_ctx, d, ARRAY_SIZE(d)); } -static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, +static void ltk050h3146w_a2_write_page(struct mipi_dsi_multi_context *dsi_ctx, int page, const struct ltk050h3146w_cmd *cmds, int num) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int i, ret; + ltk050h3146w_a2_select_page(dsi_ctx, page); - ret = ltk050h3146w_a2_select_page(ctx, page); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret); - return ret; - } - - for (i = 0; i < num; i++) { - ret = mipi_dsi_generic_write(dsi, &cmds[i], + for (int i = 0; i < num; i++) + mipi_dsi_generic_write_multi(dsi_ctx, &cmds[i], sizeof(struct ltk050h3146w_cmd)); - if (ret < 0) { - dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret); - return ret; - } - } - - return 0; } -static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) +static void ltk050h3146w_a2_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - /* * Init sequence was supplied by the panel vendor without much * documentation. */ - ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 3, page3_cmds, ARRAY_SIZE(page3_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 4, page4_cmds, ARRAY_SIZE(page4_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, + ltk050h3146w_a2_write_page(dsi_ctx, 1, page1_cmds, ARRAY_SIZE(page1_cmds)); - if (ret < 0) - return ret; - - ret = ltk050h3146w_a2_select_page(ctx, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to select page 0: %d\n", ret); - return ret; - } + ltk050h3146w_a2_select_page(dsi_ctx, 0); /* vendor code called this without param, where there should be one */ - ret = mipi_dsi_dcs_set_tear_on(dsi, 0); - if (ret < 0) { - dev_err(ctx->dev, "failed to set tear on: %d\n", ret); - return ret; - } - - msleep(60); + mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 0); - return 0; + mipi_dsi_msleep(dsi_ctx, 60); } static const struct drm_display_mode ltk050h3146w_a2_mode = { @@ -518,19 +461,12 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to set display off: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; regulator_disable(ctx->iovcc); regulator_disable(ctx->vci); @@ -542,17 +478,17 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dev_dbg(ctx->dev, "Resetting the panel\n"); - ret = regulator_enable(ctx->vci); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret); - return ret; + dsi_ctx.accum_err = regulator_enable(ctx->vci); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable vci supply: %d\n", dsi_ctx.accum_err); + return dsi_ctx.accum_err; } - ret = regulator_enable(ctx->iovcc); - if (ret < 0) { - dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); + dsi_ctx.accum_err = regulator_enable(ctx->iovcc); + if (dsi_ctx.accum_err) { + dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", dsi_ctx.accum_err); goto disable_vci; } @@ -561,28 +497,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); - ret = ctx->panel_desc->init(ctx); - if (ret < 0) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - goto disable_iovcc; - } - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - goto disable_iovcc; - } - + ctx->panel_desc->init(&dsi_ctx); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); /* T9: 120ms */ - msleep(120); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + if (dsi_ctx.accum_err) goto disable_iovcc; - } - - msleep(50); return 0; @@ -590,7 +513,7 @@ disable_iovcc: regulator_disable(ctx->iovcc); disable_vci: regulator_disable(ctx->vci); - return ret; + return dsi_ctx.accum_err; } static int ltk050h3146w_get_modes(struct drm_panel *panel, diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c index d3baccfe6286..06e16a7c14a7 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c @@ -917,7 +917,7 @@ static const struct nv3052c_panel_info wl_355608_a8_panel_info = { static const struct spi_device_id nv3052c_ids[] = { { "ltk035c5444t", }, { "fs035vg158", }, - { "wl-355608-a8", }, + { "rg35xx-plus-panel", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, nv3052c_ids); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index 57686340de49..549b86f2cc28 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -38,6 +38,7 @@ #define NT35510_CMD_CORRECT_GAMMA BIT(0) #define NT35510_CMD_CONTROL_DISPLAY BIT(1) +#define NT35510_CMD_SETVCMOFF BIT(2) #define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */ #define MCS_CMD_READ_ID1 0xDA @@ -721,11 +722,13 @@ static int nt35510_setup_power(struct nt35510 *nt) if (ret) return ret; - ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF, - NT35510_P1_VCMOFF_LEN, - nt->conf->vcmoff); - if (ret) - return ret; + if (nt->conf->cmds & NT35510_CMD_SETVCMOFF) { + ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF, + NT35510_P1_VCMOFF_LEN, + nt->conf->vcmoff); + if (ret) + return ret; + } /* Typically 10 ms */ usleep_range(10000, 20000); @@ -1319,7 +1322,7 @@ static const struct nt35510_config nt35510_frida_frd400b25025 = { }, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM, - .cmds = NT35510_CMD_CONTROL_DISPLAY, + .cmds = NT35510_CMD_CONTROL_DISPLAY | NT35510_CMD_SETVCMOFF, /* 0x03: AVDD = 6.2V */ .avdd = { 0x03, 0x03, 0x03 }, /* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */ diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c index 18bd2ee71201..04f1d2676c78 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c @@ -1095,18 +1095,6 @@ static int nt36523_unprepare(struct drm_panel *panel) static void nt36523_remove(struct mipi_dsi_device *dsi) { struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = mipi_dsi_detach(pinfo->dsi[0]); - if (ret < 0) - dev_err(&dsi->dev, "failed to detach from DSI0 host: %d\n", ret); - - if (pinfo->desc->is_dual_dsi) { - ret = mipi_dsi_detach(pinfo->dsi[1]); - if (ret < 0) - dev_err(&pinfo->dsi[1]->dev, "failed to detach from DSI1 host: %d\n", ret); - mipi_dsi_device_unregister(pinfo->dsi[1]); - } drm_panel_remove(&pinfo->panel); } @@ -1251,7 +1239,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi) if (!dsi1_host) return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n"); - pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info); + pinfo->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi1_host, info); if (IS_ERR(pinfo->dsi[1])) { dev_err(dev, "cannot get secondary DSI device\n"); return PTR_ERR(pinfo->dsi[1]); @@ -1288,7 +1276,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi) pinfo->dsi[i]->format = pinfo->desc->format; pinfo->dsi[i]->mode_flags = pinfo->desc->mode_flags; - ret = mipi_dsi_attach(pinfo->dsi[i]); + ret = devm_mipi_dsi_attach(dev, pinfo->dsi[i]); if (ret < 0) return dev_err_probe(dev, ret, "cannot attach to DSI%d host.\n", i); } diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c index 4dca6802faef..d3071c01aaea 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c @@ -46,108 +46,73 @@ static void rm69380_reset(struct rm69380_panel *ctx) static int rm69380_on(struct rm69380_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi[0]; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; if (ctx->dsi[1]) ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd4); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd0); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0x75, 0x3f); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28); - mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x08); - - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(20); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(36); - - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x3f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 36); + + return dsi_ctx.accum_err; } -static int rm69380_off(struct rm69380_panel *ctx) +static void rm69380_off(struct rm69380_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi[0]; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; if (ctx->dsi[1]) ctx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(35); - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(20); - - return 0; + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 35); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); } static int rm69380_prepare(struct drm_panel *panel) { struct rm69380_panel *ctx = to_rm69380_panel(panel); - struct device *dev = &ctx->dsi[0]->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } rm69380_reset(ctx); ret = rm69380_on(ctx); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - return ret; } - return 0; + return ret; } static int rm69380_unprepare(struct drm_panel *panel) { struct rm69380_panel *ctx = to_rm69380_panel(panel); - struct device *dev = &ctx->dsi[0]->dev; - int ret; - ret = rm69380_off(ctx); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + rm69380_off(ctx); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c new file mode 100644 index 000000000000..cf6186312252 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_ACCESS_PROT_OFF 0xb0 +#define MCS_PASSWD 0xf0 + +struct ams581vf01 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data ams581vf01_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct ams581vf01 *to_ams581vf01(struct drm_panel *panel) +{ + return container_of(panel, struct ams581vf01, panel); +} + +static void ams581vf01_reset(struct ams581vf01 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int ams581vf01_on(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Sleep Out, Wait 10ms */ + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + usleep_range(10000, 11000); + + /* TE On */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + /* MIC Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17, + 0x41, 0x92, + 0x0e, 0x10, + 0x82, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + /* Column & Page Address Setting */ + mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437); + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + /* Brightness Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + + /* Horizontal & Vertical sync Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x11, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + mipi_dsi_msleep(&dsi_ctx, 110); + + /* Display On */ + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void ams581vf01_off(struct ams581vf01 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Display Off & Sleep In */ + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + /* VCI operating mode change */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */ + + mipi_dsi_msleep(&dsi_ctx, 120); +} + +static int ams581vf01_prepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + if (ret < 0) + return ret; + + ams581vf01_reset(ctx); + + ret = ams581vf01_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int ams581vf01_unprepare(struct drm_panel *panel) +{ + struct ams581vf01 *ctx = to_ams581vf01(panel); + + ams581vf01_off(ctx); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams581vf01_mode = { + .clock = (1080 + 32 + 73 + 98) * (2340 + 8 + 1 + 8) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 32, + .hsync_end = 1080 + 32 + 73, + .htotal = 1080 + 32 + 73 + 98, + .vdisplay = 2340, + .vsync_start = 2340 + 8, + .vsync_end = 2340 + 8 + 1, + .vtotal = 2340 + 8 + 1 + 8, + .width_mm = 62, + .height_mm = 134, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int ams581vf01_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams581vf01_mode); +} + +static const struct drm_panel_funcs ams581vf01_panel_funcs = { + .prepare = ams581vf01_prepare, + .unprepare = ams581vf01_unprepare, + .get_modes = ams581vf01_get_modes, +}; + +static int ams581vf01_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops ams581vf01_bl_ops = { + .update_status = ams581vf01_bl_update_status, +}; + +static struct backlight_device * +ams581vf01_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 511, + .max_brightness = 1023, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &ams581vf01_bl_ops, &props); +} + +static int ams581vf01_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ams581vf01 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(&dsi->dev, + ARRAY_SIZE(ams581vf01_supplies), + ams581vf01_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = ams581vf01_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void ams581vf01_remove(struct mipi_dsi_device *dsi) +{ + struct ams581vf01 *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id ams581vf01_of_match[] = { + { .compatible = "samsung,ams581vf01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ams581vf01_of_match); + +static struct mipi_dsi_driver ams581vf01_driver = { + .probe = ams581vf01_probe, + .remove = ams581vf01_remove, + .driver = { + .name = "panel-samsung-ams581vf01", + .of_match_table = ams581vf01_of_match, + }, +}; +module_mipi_dsi_driver(ams581vf01_driver); + +MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>"); +MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS581VF01 cmd mode dsi panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c new file mode 100644 index 000000000000..817365cb5e46 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_ACCESS_PROT_OFF 0xb0 +#define MCS_UNKNOWN_B7 0xb7 +#define MCS_BIAS_CURRENT_CTRL 0xd1 +#define MCS_PASSWD1 0xf0 +#define MCS_PASSWD2 0xfc +#define MCS_UNKNOWN_FF 0xff + +struct ams639rq08 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data ams639rq08_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct ams639rq08 *to_ams639rq08(struct drm_panel *panel) +{ + return container_of(panel, struct ams639rq08, panel); +} + +static void ams639rq08_reset(struct ams639rq08 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int ams639rq08_on(struct ams639rq08 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Delay 2ms for VCI1 power */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_FF, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); + + /* Sleep Out */ + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + usleep_range(10000, 11000); + + /* TE OUT (Vsync On) */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + /* DBV Smooth Transition */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x01, 0x4b); + + /* Edge Dimming Speed Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + + /* Page Address Set */ + mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); + + /* Set DDIC internal HFP */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x23); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x11); + + /* OFC Setting 84.1 Mhz */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe9, 0x11, 0x55, + 0xa6, 0x75, 0xa3, + 0xb9, 0xa1, 0x4a, + 0x00, 0x1a, 0xb8); + + /* Err_FG Setting */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, + 0x00, 0x00, 0x02, + 0x02, 0x42, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, + 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x19); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + + /* Brightness Control */ + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000); + + /* Display On */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_msleep(&dsi_ctx, 67); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void ams639rq08_off(struct ams639rq08 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); +} + +static int ams639rq08_prepare(struct drm_panel *panel) +{ + struct ams639rq08 *ctx = to_ams639rq08(panel); + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + if (ret < 0) + return ret; + + ams639rq08_reset(ctx); + + ret = ams639rq08_on(ctx); + if (ret < 0) { + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int ams639rq08_unprepare(struct drm_panel *panel) +{ + struct ams639rq08 *ctx = to_ams639rq08(panel); + + ams639rq08_off(ctx); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams639rq08_mode = { + .clock = (1080 + 64 + 20 + 64) * (2340 + 64 + 20 + 64) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 64, + .hsync_end = 1080 + 64 + 20, + .htotal = 1080 + 64 + 20 + 64, + .vdisplay = 2340, + .vsync_start = 2340 + 64, + .vsync_end = 2340 + 64 + 20, + .vtotal = 2340 + 64 + 20 + 64, + .width_mm = 68, + .height_mm = 147, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int ams639rq08_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &ams639rq08_mode); +} + +static const struct drm_panel_funcs ams639rq08_panel_funcs = { + .prepare = ams639rq08_prepare, + .unprepare = ams639rq08_unprepare, + .get_modes = ams639rq08_get_modes, +}; + +static int ams639rq08_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int ams639rq08_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return brightness; +} + +static const struct backlight_ops ams639rq08_bl_ops = { + .update_status = ams639rq08_bl_update_status, + .get_brightness = ams639rq08_bl_get_brightness, +}; + +static struct backlight_device * +ams639rq08_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 1023, + .max_brightness = 2047, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &ams639rq08_bl_ops, &props); +} + +static int ams639rq08_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct ams639rq08 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(&dsi->dev, + ARRAY_SIZE(ams639rq08_supplies), + ams639rq08_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = ams639rq08_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void ams639rq08_remove(struct mipi_dsi_device *dsi) +{ + struct ams639rq08 *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id ams639rq08_of_match[] = { + { .compatible = "samsung,ams639rq08" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ams639rq08_of_match); + +static struct mipi_dsi_driver ams639rq08_driver = { + .probe = ams639rq08_probe, + .remove = ams639rq08_remove, + .driver = { + .name = "panel-samsung-ams639rq08", + .of_match_table = ams639rq08_of_match, + }, +}; +module_mipi_dsi_driver(ams639rq08_driver); + +MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>"); +MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS639RQ08 cmd mode dsi panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c index 10bc8fb5f1f9..27a059b55ae5 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c @@ -38,57 +38,38 @@ static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx) usleep_range(10000, 11000); } -static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx) +static int s6e3fa7_panel_on(struct mipi_dsi_device *dsi) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, + 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, + 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0xf4, - 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, - 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - - return 0; + return dsi_ctx.accum_err; } static int s6e3fa7_panel_prepare(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; s6e3fa7_panel_reset(ctx); - ret = s6e3fa7_panel_on(ctx); - if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); + ret = s6e3fa7_panel_on(ctx->dsi); + if (ret < 0) gpiod_set_value_cansleep(ctx->reset_gpio, 1); - return ret; - } - return 0; + return ret; } static int s6e3fa7_panel_unprepare(struct drm_panel *panel) @@ -104,23 +85,13 @@ static int s6e3fa7_panel_disable(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; - - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - return 0; + return dsi_ctx.accum_err; } static const struct drm_display_mode s6e3fa7_panel_mode = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c new file mode 100644 index 000000000000..64c6f7d45bed --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: +// Copyright (c) 2013, The Linux Foundation. All rights reserved. +// Copyright (c) 2024 Dzmitry Sankouski <dsankouski@gmail.com> + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_dsc.h> +#include <drm/display/drm_dsc_helper.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_panel.h> + +struct s6e3ha8 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct drm_dsc_config dsc; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data s6e3ha8_supplies[] = { + { .supply = "vdd3" }, + { .supply = "vci" }, + { .supply = "vddr" }, +}; + +static inline +struct s6e3ha8 *to_s6e3ha8_amb577px01_wqhd(struct drm_panel *panel) +{ + return container_of(panel, struct s6e3ha8, panel); +} + +#define s6e3ha8_test_key_on_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) +#define s6e3ha8_test_key_off_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) +#define s6e3ha8_test_key_on_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0x5a, 0x5a) +#define s6e3ha8_test_key_off_lvl3(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0xa5, 0xa5) +#define s6e3ha8_test_key_on_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0xa5, 0xa5) +#define s6e3ha8_test_key_off_lvl1(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0x5a, 0x5a) +#define s6e3ha8_afc_off(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xe2, 0x00, 0x00) + +static void s6e3ha8_amb577px01_wqhd_reset(struct s6e3ha8 *priv) +{ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); +} + +static int s6e3ha8_amb577px01_wqhd_on(struct s6e3ha8 *priv) +{ + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + s6e3ha8_test_key_on_lvl1(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_compression_mode_multi(&ctx, true); + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_dcs_exit_sleep_mode_multi(&ctx); + usleep_range(5000, 6000); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13); + s6e3ha8_test_key_off_lvl2(&ctx); + usleep_range(10000, 11000); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13); + s6e3ha8_test_key_off_lvl2(&ctx); + + /* OMOK setting 1 (Initial setting) - Scaler Latch Setting Guide */ + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x07); + /* latch setting 1 : Scaler on/off & address setting & PPS setting -> Image update latch */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x3c, 0x10); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x0b); + /* latch setting 2 : Ratio change mode -> Image update latch */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x30); + /* OMOK setting 2 - Seamless setting guide : WQHD */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00, 0x00, 0x05, 0x9f); /* CASET */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00, 0x00, 0x0b, 0x8f); /* PASET */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x01); /* scaler setup : scaler off */ + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00); /* TE Vsync ON */ + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xed, 0x4c); /* ERR_FG */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl3(&ctx); + /* FFC Setting 897.6Mbps */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x0d, 0x10, 0xb4, 0x3e, 0x01); + s6e3ha8_test_key_off_lvl3(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, + 0x00, 0xb0, 0x81, 0x09, 0x00, 0x00, 0x00, + 0x11, 0x03); /* TSP HSYNC Setting */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x03); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf6, 0x43); + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_on_lvl2(&ctx); + /* Brightness condition set */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, + 0x07, 0x00, 0x00, 0x00, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x0c); /* AID Set : 0% */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, + 0x19, 0xdc, 0x16, 0x01, 0x34, 0x67, 0x9a, + 0xcd, 0x01, 0x22, 0x33, 0x44, 0x00, 0x00, + 0x05, 0x55, 0xcc, 0x0c, 0x01, 0x11, 0x11, + 0x10); /* MPS/ELVSS Setting */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf4, 0xeb, 0x28); /* VINT */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf7, 0x03); /* Gamma, LTPS(AID) update */ + s6e3ha8_test_key_off_lvl2(&ctx); + + s6e3ha8_test_key_off_lvl1(&ctx); + + return ctx.accum_err; +} + +static int s6e3ha8_enable(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_dcs_set_display_on_multi(&ctx); + s6e3ha8_test_key_off_lvl1(&ctx); + + return ctx.accum_err; +} + +static int s6e3ha8_disable(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_dcs_set_display_off_multi(&ctx); + s6e3ha8_test_key_off_lvl1(&ctx); + mipi_dsi_msleep(&ctx, 20); + + s6e3ha8_test_key_on_lvl2(&ctx); + s6e3ha8_afc_off(&ctx); + s6e3ha8_test_key_off_lvl2(&ctx); + + mipi_dsi_msleep(&ctx, 160); + + return ctx.accum_err; +} + +static int s6e3ha8_amb577px01_wqhd_prepare(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + struct mipi_dsi_device *dsi = priv->dsi; + struct mipi_dsi_multi_context ctx = { .dsi = dsi }; + struct drm_dsc_picture_parameter_set pps; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); + if (ret < 0) + return ret; + mipi_dsi_msleep(&ctx, 120); + s6e3ha8_amb577px01_wqhd_reset(priv); + + ret = s6e3ha8_amb577px01_wqhd_on(priv); + if (ret < 0) { + gpiod_set_value_cansleep(priv->reset_gpio, 1); + goto err; + } + + drm_dsc_pps_payload_pack(&pps, &priv->dsc); + + s6e3ha8_test_key_on_lvl1(&ctx); + mipi_dsi_picture_parameter_set_multi(&ctx, &pps); + s6e3ha8_test_key_off_lvl1(&ctx); + + mipi_dsi_msleep(&ctx, 28); + + return ctx.accum_err; +err: + regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); + return ret; +} + +static int s6e3ha8_amb577px01_wqhd_unprepare(struct drm_panel *panel) +{ + struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel); + + return regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies); +} + +static const struct drm_display_mode s6e3ha8_amb577px01_wqhd_mode = { + .clock = (1440 + 116 + 44 + 120) * (2960 + 120 + 80 + 124) * 60 / 1000, + .hdisplay = 1440, + .hsync_start = 1440 + 116, + .hsync_end = 1440 + 116 + 44, + .htotal = 1440 + 116 + 44 + 120, + .vdisplay = 2960, + .vsync_start = 2960 + 120, + .vsync_end = 2960 + 120 + 80, + .vtotal = 2960 + 120 + 80 + 124, + .width_mm = 64, + .height_mm = 132, +}; + +static int s6e3ha8_amb577px01_wqhd_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &s6e3ha8_amb577px01_wqhd_mode); +} + +static const struct drm_panel_funcs s6e3ha8_amb577px01_wqhd_panel_funcs = { + .prepare = s6e3ha8_amb577px01_wqhd_prepare, + .unprepare = s6e3ha8_amb577px01_wqhd_unprepare, + .get_modes = s6e3ha8_amb577px01_wqhd_get_modes, + .enable = s6e3ha8_enable, + .disable = s6e3ha8_disable, +}; + +static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e3ha8 *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies), + s6e3ha8_supplies, + &priv->supplies); + if (ret < 0) { + dev_err(dev, "failed to get regulators: %d\n", ret); + return ret; + } + + priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset-gpios\n"); + + priv->dsi = dsi; + mipi_dsi_set_drvdata(dsi, priv); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | + MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET; + + drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + priv->panel.prepare_prev_first = true; + + drm_panel_add(&priv->panel); + + /* This panel only supports DSC; unconditionally enable it */ + dsi->dsc = &priv->dsc; + + priv->dsc.dsc_version_major = 1; + priv->dsc.dsc_version_minor = 1; + + priv->dsc.slice_height = 40; + priv->dsc.slice_width = 720; + WARN_ON(1440 % priv->dsc.slice_width); + priv->dsc.slice_count = 1440 / priv->dsc.slice_width; + priv->dsc.bits_per_component = 8; + priv->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */ + priv->dsc.block_pred_enable = true; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&priv->panel); + return ret; + } + + return 0; +} + +static void s6e3ha8_amb577px01_wqhd_remove(struct mipi_dsi_device *dsi) +{ + struct s6e3ha8 *priv = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&priv->panel); +} + +static const struct of_device_id s6e3ha8_amb577px01_wqhd_of_match[] = { + { .compatible = "samsung,s6e3ha8" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e3ha8_amb577px01_wqhd_of_match); + +static struct mipi_dsi_driver s6e3ha8_amb577px01_wqhd_driver = { + .probe = s6e3ha8_amb577px01_wqhd_probe, + .remove = s6e3ha8_amb577px01_wqhd_remove, + .driver = { + .name = "panel-s6e3ha8", + .of_match_table = s6e3ha8_amb577px01_wqhd_of_match, + }, +}; +module_mipi_dsi_driver(s6e3ha8_amb577px01_wqhd_driver); + +MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for S6E3HA8 panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c new file mode 100644 index 000000000000..e92e95158d1f --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Samsung AMS427AP24 panel with S6E88A0 controller + * Copyright (c) 2024 Jakob Hauser <jahau@rocketmail.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#define NUM_STEPS_CANDELA 54 +#define NUM_STEPS_AID 39 +#define NUM_STEPS_ELVSS 17 + +/* length of the payload data, thereof fixed and variable */ +#define FIX_LEN_AID 4 +#define FIX_LEN_ELVSS 2 +#define FIX_LEN_GAMMA 1 +#define VAR_LEN_AID 2 +#define VAR_LEN_ELVSS 1 +#define VAR_LEN_GAMMA 33 +#define LEN_AID (FIX_LEN_AID + VAR_LEN_AID) +#define LEN_ELVSS (FIX_LEN_ELVSS + VAR_LEN_ELVSS) +#define LEN_GAMMA (FIX_LEN_GAMMA + VAR_LEN_GAMMA) + +struct s6e88a0_ams427ap24 { + struct drm_panel panel; + struct backlight_device *bl_dev; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; + bool flip_horizontal; +}; + +static const struct regulator_bulk_data s6e88a0_ams427ap24_supplies[] = { + { .supply = "vdd3" }, + { .supply = "vci" }, +}; + +static inline +struct s6e88a0_ams427ap24 *to_s6e88a0_ams427ap24(struct drm_panel *panel) +{ + return container_of(panel, struct s6e88a0_ams427ap24, panel); +} + +enum candela { + CANDELA_10CD, /* 0 */ + CANDELA_11CD, + CANDELA_12CD, + CANDELA_13CD, + CANDELA_14CD, + CANDELA_15CD, + CANDELA_16CD, + CANDELA_17CD, + CANDELA_19CD, + CANDELA_20CD, + CANDELA_21CD, + CANDELA_22CD, + CANDELA_24CD, + CANDELA_25CD, + CANDELA_27CD, + CANDELA_29CD, + CANDELA_30CD, + CANDELA_32CD, + CANDELA_34CD, + CANDELA_37CD, + CANDELA_39CD, + CANDELA_41CD, + CANDELA_44CD, + CANDELA_47CD, + CANDELA_50CD, + CANDELA_53CD, + CANDELA_56CD, + CANDELA_60CD, + CANDELA_64CD, + CANDELA_68CD, + CANDELA_72CD, + CANDELA_77CD, + CANDELA_82CD, + CANDELA_87CD, + CANDELA_93CD, + CANDELA_98CD, + CANDELA_105CD, + CANDELA_111CD, + CANDELA_119CD, + CANDELA_126CD, + CANDELA_134CD, + CANDELA_143CD, + CANDELA_152CD, + CANDELA_162CD, + CANDELA_172CD, + CANDELA_183CD, + CANDELA_195CD, + CANDELA_207CD, + CANDELA_220CD, + CANDELA_234CD, + CANDELA_249CD, + CANDELA_265CD, + CANDELA_282CD, + CANDELA_300CD, /* 53 */ +}; + +static const int s6e88a0_ams427ap24_br_to_cd[NUM_STEPS_CANDELA] = { + /* columns: brightness from, brightness till, candela */ + /* 0 */ 10, /* 10CD */ + /* 11 */ 11, /* 11CD */ + /* 12 */ 12, /* 12CD */ + /* 13 */ 13, /* 13CD */ + /* 14 */ 14, /* 14CD */ + /* 15 */ 15, /* 15CD */ + /* 16 */ 16, /* 16CD */ + /* 17 */ 17, /* 17CD */ + /* 18 */ 18, /* 19CD */ + /* 19 */ 19, /* 20CD */ + /* 20 */ 20, /* 21CD */ + /* 21 */ 21, /* 22CD */ + /* 22 */ 22, /* 24CD */ + /* 23 */ 23, /* 25CD */ + /* 24 */ 24, /* 27CD */ + /* 25 */ 25, /* 29CD */ + /* 26 */ 26, /* 30CD */ + /* 27 */ 27, /* 32CD */ + /* 28 */ 28, /* 34CD */ + /* 29 */ 29, /* 37CD */ + /* 30 */ 30, /* 39CD */ + /* 31 */ 32, /* 41CD */ + /* 33 */ 34, /* 44CD */ + /* 35 */ 36, /* 47CD */ + /* 37 */ 38, /* 50CD */ + /* 39 */ 40, /* 53CD */ + /* 41 */ 43, /* 56CD */ + /* 44 */ 46, /* 60CD */ + /* 47 */ 49, /* 64CD */ + /* 50 */ 52, /* 68CD */ + /* 53 */ 56, /* 72CD */ + /* 57 */ 59, /* 77CD */ + /* 60 */ 63, /* 82CD */ + /* 64 */ 67, /* 87CD */ + /* 68 */ 71, /* 93CD */ + /* 72 */ 76, /* 98CD */ + /* 77 */ 80, /* 105CD */ + /* 81 */ 86, /* 111CD */ + /* 87 */ 91, /* 119CD */ + /* 92 */ 97, /* 126CD */ + /* 98 */ 104, /* 134CD */ + /* 105 */ 110, /* 143CD */ + /* 111 */ 118, /* 152CD */ + /* 119 */ 125, /* 162CD */ + /* 126 */ 133, /* 172CD */ + /* 134 */ 142, /* 183CD */ + /* 143 */ 150, /* 195CD */ + /* 151 */ 160, /* 207CD */ + /* 161 */ 170, /* 220CD */ + /* 171 */ 181, /* 234CD */ + /* 182 */ 205, /* 249CD */ + /* 206 */ 234, /* 265CD */ + /* 235 */ 254, /* 282CD */ + /* 255 */ 255, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_aid[NUM_STEPS_AID][VAR_LEN_AID] = { + { 0x03, 0x77 }, /* AOR 90.9%, 10CD */ + { 0x03, 0x73 }, /* AOR 90.5%, 11CD */ + { 0x03, 0x69 }, /* AOR 89.4%, 12CD */ + { 0x03, 0x65 }, /* AOR 89.0%, 13CD */ + { 0x03, 0x61 }, /* AOR 88.6%, 14CD */ + { 0x03, 0x55 }, /* AOR 87.4%, 15CD */ + { 0x03, 0x50 }, /* AOR 86.9%, 16CD */ + { 0x03, 0x45 }, /* AOR 85.8%, 17CD */ + { 0x03, 0x35 }, /* AOR 84.1%, 19CD */ + { 0x03, 0x27 }, /* AOR 82.7%, 20CD */ + { 0x03, 0x23 }, /* AOR 82.3%, 21CD */ + { 0x03, 0x17 }, /* AOR 81.0%, 22CD */ + { 0x03, 0x11 }, /* AOR 80.4%, 24CD */ + { 0x03, 0x04 }, /* AOR 79.1%, 25CD */ + { 0x02, 0xf4 }, /* AOR 77.5%, 27CD */ + { 0x02, 0xe3 }, /* AOR 75.7%, 29CD */ + { 0x02, 0xd7 }, /* AOR 74.5%, 30CD */ + { 0x02, 0xc6 }, /* AOR 72.7%, 32CD */ + { 0x02, 0xb7 }, /* AOR 71.2%, 34CD */ + { 0x02, 0xa1 }, /* AOR 69.0%, 37CD */ + { 0x02, 0x91 }, /* AOR 67.3%, 39CD */ + { 0x02, 0x78 }, /* AOR 64.8%, 41CD */ + { 0x02, 0x62 }, /* AOR 62.5%, 44CD */ + { 0x02, 0x45 }, /* AOR 59.5%, 47CD */ + { 0x02, 0x30 }, /* AOR 57.4%, 50CD */ + { 0x02, 0x13 }, /* AOR 54.4%, 53CD */ + { 0x01, 0xf5 }, /* AOR 51.3%, 56CD */ + { 0x01, 0xd3 }, /* AOR 47.8%, 60CD */ + { 0x01, 0xb1 }, /* AOR 44.4%, 64CD */ + { 0x01, 0x87 }, /* AOR 40.1%, 68CD */ + { 0x01, 0x63 }, /* AOR 36.6%, 72CD */ + { 0x01, 0x35 }, /* AOR 31.7%, 77CD */ + { 0x01, 0x05 }, /* AOR 26.9%, 82CD */ + { 0x00, 0xd5 }, /* AOR 21.8%, 87CD */ + { 0x00, 0xa1 }, /* AOR 16.5%, 93CD */ + { 0x00, 0x6f }, /* AOR 11.4%, 98CD */ + { 0x00, 0x31 }, /* AOR 5.0%, 105CD */ + { 0x01, 0x86 }, /* AOR 40.0%, 111CD ~ 172CD */ + { 0x00, 0x08 }, /* AOR 0.6%, 183CD ~ 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_elvss[NUM_STEPS_ELVSS][VAR_LEN_ELVSS] = { + { 0x14 }, /* 10CD ~ 111CD */ + { 0x13 }, /* 119CD */ + { 0x12 }, /* 126CD */ + { 0x12 }, /* 134CD */ + { 0x11 }, /* 143CD */ + { 0x10 }, /* 152CD */ + { 0x0f }, /* 162CD */ + { 0x0e }, /* 172CD */ + { 0x11 }, /* 183CD */ + { 0x11 }, /* 195CD */ + { 0x10 }, /* 207CD */ + { 0x0f }, /* 220CD */ + { 0x0f }, /* 234CD */ + { 0x0e }, /* 249CD */ + { 0x0d }, /* 265CD */ + { 0x0c }, /* 282CD */ + { 0x0b }, /* 300CD */ +}; + +static const u8 s6e88a0_ams427ap24_gamma[NUM_STEPS_CANDELA][VAR_LEN_GAMMA] = { + /* 10CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 11CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b, + 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b, + 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 12CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 13CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a, + 0x72, 0x8b, 0x61, 0x69, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 14CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x88, 0x89, 0x8a, 0x87, 0x86, 0x8a, 0x82, 0x82, 0x87, 0x79, + 0x71, 0x89, 0x63, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 15CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8c, + 0x8c, 0x86, 0x87, 0x88, 0x85, 0x85, 0x8a, 0x83, 0x83, 0x88, 0x78, + 0x72, 0x89, 0x64, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 16CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x88, 0x86, 0x86, 0x8a, 0x84, 0x84, 0x88, 0x78, + 0x72, 0x89, 0x5d, 0x67, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 17CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x8a, 0x84, 0x83, 0x87, 0x78, + 0x73, 0x89, 0x64, 0x6e, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 19CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x89, 0x84, 0x84, 0x87, 0x77, + 0x72, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 20CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x73, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 21CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79, + 0x74, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 22CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x75, 0x87, 0x65, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 24CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b, + 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c, + 0x76, 0x86, 0x66, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + /* 25CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 27CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f, + 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 29CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 30CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 32CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80, + 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 34CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 37CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x88, 0x87, 0x86, 0x87, 0x83, 0x84, 0x84, 0x7f, + 0x79, 0x86, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 39CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x83, 0x85, 0x85, 0x80, + 0x79, 0x85, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 41CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x93, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 44CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x84, 0x6e, 0x79, 0x92, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 47CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x81, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 50CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x82, 0x84, 0x83, 0x7f, + 0x79, 0x83, 0x6f, 0x79, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 53CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b, + 0x8b, 0x86, 0x88, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x85, 0x7f, + 0x79, 0x83, 0x70, 0x79, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 56CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7f, + 0x79, 0x82, 0x70, 0x7a, 0x8e, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 60CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7e, + 0x79, 0x82, 0x71, 0x7a, 0x8d, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 64CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x83, 0x82, 0x80, + 0x7a, 0x84, 0x71, 0x7a, 0x8c, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 68CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7b, 0x8b, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 72CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x86, 0x88, 0x86, 0x85, 0x85, 0x86, 0x82, 0x84, 0x82, 0x81, + 0x7b, 0x83, 0x72, 0x7c, 0x8a, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 77CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x72, 0x7c, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 82CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81, + 0x7c, 0x82, 0x73, 0x7c, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 87CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a, + 0x8a, 0x85, 0x87, 0x85, 0x84, 0x84, 0x86, 0x80, 0x84, 0x81, 0x80, + 0x7a, 0x82, 0x76, 0x7f, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 93CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x84, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 98CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a, + 0x8a, 0x86, 0x87, 0x85, 0x85, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80, + 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 }, + /* 105CD */ + { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x89, 0x88, 0x88, 0x8b, 0x8a, + 0x8a, 0x84, 0x87, 0x85, 0x85, 0x85, 0x85, 0x80, 0x84, 0x80, 0x7f, + 0x79, 0x81, 0x71, 0x7d, 0x87, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 }, + /* 111CD */ + { 0x00, 0xdf, 0x00, 0xde, 0x00, 0xde, 0x85, 0x85, 0x84, 0x87, 0x86, + 0x87, 0x85, 0x86, 0x85, 0x83, 0x83, 0x83, 0x81, 0x82, 0x82, 0x80, + 0x7d, 0x82, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 119CD */ + { 0x00, 0xe3, 0x00, 0xe1, 0x00, 0xe2, 0x85, 0x85, 0x84, 0x86, 0x85, + 0x85, 0x84, 0x85, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x7e, + 0x7b, 0x81, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 126CD */ + { 0x00, 0xe6, 0x00, 0xe5, 0x00, 0xe5, 0x85, 0x84, 0x84, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x84, 0x82, 0x83, 0x83, 0x80, 0x81, 0x81, 0x80, + 0x7f, 0x83, 0x73, 0x7c, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 134CD */ + { 0x00, 0xe9, 0x00, 0xe8, 0x00, 0xe8, 0x84, 0x84, 0x83, 0x85, 0x85, + 0x85, 0x84, 0x84, 0x83, 0x81, 0x82, 0x82, 0x81, 0x81, 0x81, 0x7f, + 0x7d, 0x81, 0x73, 0x7c, 0x83, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 143CD */ + { 0x00, 0xed, 0x00, 0xec, 0x00, 0xec, 0x84, 0x83, 0x83, 0x84, 0x84, + 0x84, 0x84, 0x84, 0x83, 0x82, 0x83, 0x83, 0x81, 0x80, 0x81, 0x7f, + 0x7e, 0x81, 0x70, 0x79, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 152CD */ + { 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x84, 0x84, 0x83, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x80, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 162CD */ + { 0x00, 0xf4, 0x00, 0xf3, 0x00, 0xf4, 0x83, 0x83, 0x83, 0x83, 0x83, + 0x83, 0x82, 0x81, 0x81, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80, + 0x7f, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 172CD */ + { 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8, 0x82, 0x82, 0x82, 0x82, 0x82, + 0x82, 0x82, 0x81, 0x81, 0x80, 0x81, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x80, 0x83, 0x6d, 0x76, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 183CD */ + { 0x00, 0xe0, 0x00, 0xdf, 0x00, 0xdf, 0x84, 0x84, 0x83, 0x86, 0x86, + 0x86, 0x83, 0x84, 0x83, 0x82, 0x82, 0x82, 0x81, 0x83, 0x81, 0x81, + 0x7e, 0x81, 0x80, 0x82, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 195CD */ + { 0x00, 0xe4, 0x00, 0xe3, 0x00, 0xe3, 0x84, 0x83, 0x83, 0x85, 0x85, + 0x85, 0x83, 0x84, 0x83, 0x81, 0x82, 0x82, 0x82, 0x83, 0x81, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 207CD */ + { 0x00, 0xe7, 0x00, 0xe6, 0x00, 0xe6, 0x83, 0x82, 0x82, 0x85, 0x85, + 0x85, 0x82, 0x83, 0x83, 0x82, 0x82, 0x82, 0x80, 0x81, 0x80, 0x81, + 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 220CD */ + { 0x00, 0xeb, 0x00, 0xea, 0x00, 0xea, 0x83, 0x83, 0x82, 0x84, 0x84, + 0x84, 0x82, 0x83, 0x82, 0x81, 0x81, 0x82, 0x81, 0x82, 0x81, 0x80, + 0x7e, 0x80, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 234CD */ + { 0x00, 0xef, 0x00, 0xee, 0x00, 0xee, 0x83, 0x82, 0x82, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x81, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 249CD */ + { 0x00, 0xf3, 0x00, 0xf2, 0x00, 0xf2, 0x82, 0x81, 0x81, 0x83, 0x83, + 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 265CD */ + { 0x00, 0xf7, 0x00, 0xf7, 0x00, 0xf7, 0x81, 0x81, 0x80, 0x82, 0x82, + 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x80, 0x7f, + 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 282CD */ + { 0x00, 0xfb, 0x00, 0xfb, 0x00, 0xfb, 0x80, 0x80, 0x80, 0x81, 0x81, + 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x78, 0x79, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 }, + /* 300CD */ + { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00 }, +}; + +static int s6e88a0_ams427ap24_set_brightness(struct backlight_device *bd) +{ + struct s6e88a0_ams427ap24 *ctx = bl_get_data(bd); + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int brightness = bd->props.brightness; + int candela_enum; + u8 b2[LEN_AID] = { 0xb2, 0x40, 0x08, 0x20, 0x00, 0x00 }; + u8 b6[LEN_ELVSS] = { 0xb6, 0x28, 0x00 }; + u8 ca[LEN_GAMMA]; + + /* get candela enum from brightness */ + for (candela_enum = 0; candela_enum < NUM_STEPS_CANDELA; candela_enum++) + if (brightness <= s6e88a0_ams427ap24_br_to_cd[candela_enum]) + break; + + /* get aid */ + switch (candela_enum) { + case CANDELA_10CD ... CANDELA_105CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[candela_enum], + VAR_LEN_AID); + break; + case CANDELA_111CD ... CANDELA_172CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD], + VAR_LEN_AID); + break; + case CANDELA_183CD ... CANDELA_300CD: + memcpy(&b2[FIX_LEN_AID], + s6e88a0_ams427ap24_aid[CANDELA_111CD + 1], + VAR_LEN_AID); + break; + default: + dev_err(dev, "Failed to get aid data\n"); + return -EINVAL; + } + + /* get elvss */ + if (candela_enum <= CANDELA_111CD) { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[0], + VAR_LEN_ELVSS); + } else { + memcpy(&b6[FIX_LEN_ELVSS], + s6e88a0_ams427ap24_elvss[candela_enum - CANDELA_111CD], + VAR_LEN_ELVSS); + } + + /* get gamma */ + ca[0] = 0xca; + memcpy(&ca[FIX_LEN_GAMMA], + s6e88a0_ams427ap24_gamma[candela_enum], + VAR_LEN_GAMMA); + + /* write data */ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b2, ARRAY_SIZE(b2)); // set aid + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x00); // acl off + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b6, ARRAY_SIZE(b6)); // set elvss + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, ca, ARRAY_SIZE(ca)); // set gamma + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma update + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + + return dsi_ctx.accum_err; +} + +static void s6e88a0_ams427ap24_reset(struct s6e88a0_ams427ap24 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(18000, 19000); +} + +static int s6e88a0_ams427ap24_on(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x5a, 0x5a); // level 2 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x11); // src latch set global 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x11); // src latch set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x13); // src latch set global 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x18); // src latch set 2 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02); // avdd set 1 + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x30); // avdd set 2 + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x5a, 0x5a); // level 3 key on + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // pixel clock divider pol. + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x03, 0x0d); // unknown + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0xa5, 0xa5); // level 3 key off + + if (ctx->flip_horizontal) + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x0e); // flip display + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0xa5, 0xa5); // level 2 key off + + ret = s6e88a0_ams427ap24_set_brightness(ctx->bl_dev); + if (ret < 0) { + dev_err(dev, "Failed to set brightness: %d\n", ret); + return ret; + } + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_off(struct s6e88a0_ams427ap24 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + + return dsi_ctx.accum_err; +} + +static int s6e88a0_ams427ap24_prepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + s6e88a0_ams427ap24_reset(ctx); + + ret = s6e88a0_ams427ap24_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + return ret; + } + + return 0; +} + +static int s6e88a0_ams427ap24_unprepare(struct drm_panel *panel) +{ + struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = s6e88a0_ams427ap24_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + ctx->supplies); + + return 0; +} + +static const struct drm_display_mode s6e88a0_ams427ap24_mode = { + .clock = (540 + 94 + 4 + 18) * (960 + 12 + 1 + 3) * 60 / 1000, + .hdisplay = 540, + .hsync_start = 540 + 94, + .hsync_end = 540 + 94 + 4, + .htotal = 540 + 94 + 4 + 18, + .vdisplay = 960, + .vsync_start = 960 + 12, + .vsync_end = 960 + 12 + 1, + .vtotal = 960 + 12 + 1 + 3, + .width_mm = 55, + .height_mm = 95, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int s6e88a0_ams427ap24_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, + &s6e88a0_ams427ap24_mode); +} + +static const struct drm_panel_funcs s6e88a0_ams427ap24_panel_funcs = { + .prepare = s6e88a0_ams427ap24_prepare, + .unprepare = s6e88a0_ams427ap24_unprepare, + .get_modes = s6e88a0_ams427ap24_get_modes, +}; + +static const struct backlight_ops s6e88a0_ams427ap24_bl_ops = { + .update_status = s6e88a0_ams427ap24_set_brightness, +}; + +static int s6e88a0_ams427ap24_register_backlight(struct s6e88a0_ams427ap24 *ctx) +{ + struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 180, + .max_brightness = 255, + }; + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret = 0; + + ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dev, ctx, + &s6e88a0_ams427ap24_bl_ops, + &props); + if (IS_ERR(ctx->bl_dev)) { + ret = PTR_ERR(ctx->bl_dev); + dev_err(dev, "error registering backlight device (%d)\n", ret); + } + + return ret; +} + +static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e88a0_ams427ap24 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(s6e88a0_ams427ap24_supplies), + s6e88a0_ams427ap24_supplies, + &ctx->supplies); + if (ret < 0) + return ret; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP; + + drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal"); + + ret = s6e88a0_ams427ap24_register_backlight(ctx); + if (ret < 0) + return ret; + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void s6e88a0_ams427ap24_remove(struct mipi_dsi_device *dsi) +{ + struct s6e88a0_ams427ap24 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id s6e88a0_ams427ap24_of_match[] = { + { .compatible = "samsung,s6e88a0-ams427ap24" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s6e88a0_ams427ap24_of_match); + +static struct mipi_dsi_driver s6e88a0_ams427ap24_driver = { + .probe = s6e88a0_ams427ap24_probe, + .remove = s6e88a0_ams427ap24_remove, + .driver = { + .name = "panel-s6e88a0-ams427ap24", + .of_match_table = s6e88a0_ams427ap24_of_match, + }, +}; +module_mipi_dsi_driver(s6e88a0_ams427ap24_driver); + +MODULE_AUTHOR("Jakob Hauser <jahau@rocketmail.com>"); +MODULE_DESCRIPTION("Samsung AMS427AP24 panel with S6E88A0 controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 86735430462f..06381c628209 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4565,6 +4565,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode mchp_ac69t88a_mode = { + .clock = 25000, + .hdisplay = 800, + .hsync_start = 800 + 88, + .hsync_end = 800 + 88 + 5, + .htotal = 800 + 88 + 5 + 40, + .vdisplay = 480, + .vsync_start = 480 + 23, + .vsync_end = 480 + 23 + 5, + .vtotal = 480 + 23 + 5 + 1, +}; + +static const struct panel_desc mchp_ac69t88a = { + .modes = &mchp_ac69t88a_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode arm_rtsm_mode[] = { { .clock = 65000, @@ -5049,6 +5074,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "yes-optoelectronics,ytc700tlag-05-201c", .data = &yes_optoelectronics_ytc700tlag_05_201c, }, { + .compatible = "microchip,ac69t88a", + .data = &mchp_ac69t88a, + }, { /* Must be the last entry */ .compatible = "panel-dpi", .data = &panel_dpi, diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index 217f03569494..d437f5c84f5f 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -562,8 +562,7 @@ static int acx565akm_detect(struct acx565akm_panel *lcd) lcd->enabled ? "enabled" : "disabled ", status); acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3); - dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n", - lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]); + dev_dbg(&lcd->spi->dev, "MIPI display ID: %3phN\n", lcd->display_id); switch (lcd->display_id[0]) { case 0x10: diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 2d30da38c2c3..3385fd3ef41a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -38,7 +38,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, return PTR_ERR(opp); dev_pm_opp_put(opp); - err = dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); if (!err) ptdev->pfdevfreq.current_frequency = *freq; @@ -182,6 +182,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 671eed4ad890..04d615df5259 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -3,6 +3,10 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/module.h> #include <linux/of.h> #include <linux/pagemap.h> @@ -21,13 +25,33 @@ #include "panfrost_gpu.h" #include "panfrost_perfcnt.h" +#define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT) + static bool unstable_ioctls; module_param_unsafe(unstable_ioctls, bool, 0600); +static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, + u64 *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(pfdev->dev); + if (ret) + return ret; + + panfrost_cycle_counter_get(pfdev); + *arg = panfrost_timestamp_read(pfdev); + panfrost_cycle_counter_put(pfdev); + + pm_runtime_put(pfdev->dev); + return 0; +} + static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panfrost_get_param *param = data; struct panfrost_device *pfdev = ddev->dev_private; + int ret; if (param->pad != 0) return -EINVAL; @@ -69,6 +93,21 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP: + ret = panfrost_ioctl_query_timestamp(pfdev, ¶m->value); + if (ret) + return ret; + break; + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY: +#ifdef CONFIG_ARM_ARCH_TIMER + param->value = arch_timer_get_cntfrq(); +#else + param->value = 0; +#endif + break; + default: return -EINVAL; } @@ -245,7 +284,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, if (!args->jc) return -EINVAL; - if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) + if (args->requirements & ~JOB_REQUIREMENTS) return -EINVAL; if (args->out_sync > 0) { @@ -584,6 +623,8 @@ static const struct file_operations panfrost_drm_driver_fops = { * - 1.0 - initial interface * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO * - 1.2 - adds AFBC_FEATURES query + * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT + * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries */ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -597,7 +638,7 @@ static const struct drm_driver panfrost_drm_driver = { .desc = "panfrost DRM", .date = "20180908", .major = 1, - .minor = 2, + .minor = 3, .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index fd8e44992184..f5abde3866fb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -177,7 +177,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) struct panfrost_model { const char *name; u32 id; - u32 id_mask; u64 features; u64 issues; struct { @@ -380,6 +379,18 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) return ((u64)hi << 32) | lo; } +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev) +{ + u32 hi, lo; + + do { + hi = gpu_read(pfdev, GPU_TIMESTAMP_HI); + lo = gpu_read(pfdev, GPU_TIMESTAMP_LO); + } while (hi != gpu_read(pfdev, GPU_TIMESTAMP_HI)); + + return ((u64)hi << 32) | lo; +} + static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) { u64 core_mask; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index d841b86504ea..b4fef11211d5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -20,6 +20,7 @@ void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev); void panfrost_cycle_counter_get(struct panfrost_device *pfdev); void panfrost_cycle_counter_put(struct panfrost_device *pfdev); unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev); +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev); void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index df49d37d0e7e..9b8e82fb8bc4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -159,16 +159,17 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) struct panfrost_job *job = pfdev->jobs[slot][0]; WARN_ON(!job); - if (job->is_profiled) { - if (job->engine_usage) { - job->engine_usage->elapsed_ns[slot] += - ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); - job->engine_usage->cycles[slot] += - panfrost_cycle_counter_read(pfdev) - job->start_cycles; - } - panfrost_cycle_counter_put(job->pfdev); + + if (job->is_profiled && job->engine_usage) { + job->engine_usage->elapsed_ns[slot] += + ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); + job->engine_usage->cycles[slot] += + panfrost_cycle_counter_read(pfdev) - job->start_cycles; } + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled) + panfrost_cycle_counter_put(pfdev); + pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; pfdev->jobs[slot][1] = NULL; @@ -243,9 +244,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) subslot = panfrost_enqueue_job(pfdev, js, job); /* Don't queue the job if a reset is in progress */ if (!atomic_read(&pfdev->reset.pending)) { - if (pfdev->profile_mode) { + job->is_profiled = pfdev->profile_mode; + + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + job->is_profiled) panfrost_cycle_counter_get(pfdev); - job->is_profiled = true; + + if (job->is_profiled) { job->start_time = ktime_get(); job->start_cycles = panfrost_cycle_counter_read(pfdev); } @@ -693,7 +698,8 @@ panfrost_reset(struct panfrost_device *pfdev, spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { - if (pfdev->jobs[i][j]->is_profiled) + if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); pm_runtime_put_noidle(pfdev->dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); @@ -727,7 +733,7 @@ panfrost_reset(struct panfrost_device *pfdev, /* Restart the schedulers */ for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_start(&pfdev->js->queue[i].sched); + drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ job_write(pfdev, JOB_INT_MASK, diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index c25743b05c55..c7bba476ab3f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -78,6 +78,8 @@ #define GPU_CYCLE_COUNT_LO 0x90 #define GPU_CYCLE_COUNT_HI 0x94 +#define GPU_TIMESTAMP_LO 0x98 +#define GPU_TIMESTAMP_HI 0x9C #define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ #define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index c6d3c327cc24..ecc7a52bd688 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq) static int panthor_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { + struct panthor_device *ptdev = dev_get_drvdata(dev); struct dev_pm_opp *opp; + int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); - return dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); + if (!err) + ptdev->current_frequency = *freq; + + return err; } static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq) @@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) struct panthor_devfreq *pdevfreq; struct dev_pm_opp *opp; unsigned long cur_freq; + unsigned long freq = ULONG_MAX; int ret; pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL); @@ -156,12 +163,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev) cur_freq = clk_get_rate(ptdev->clks.core); - opp = devfreq_recommended_opp(dev, &cur_freq, 0); - if (IS_ERR(opp)) - return PTR_ERR(opp); - - panthor_devfreq_profile.initial_freq = cur_freq; - /* Regulator coupling only takes care of synchronizing/balancing voltage * updates, but the coupled regulator needs to be enabled manually. * @@ -192,16 +193,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev) return ret; } + opp = devfreq_recommended_opp(dev, &cur_freq, 0); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + panthor_devfreq_profile.initial_freq = cur_freq; + ptdev->current_frequency = cur_freq; + /* * Set the recommend OPP this will enable and configure the regulator * if any and will avoid a switch off by regulator_late_cleanup() */ ret = dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); if (ret) { DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); return ret; } + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + ptdev->fast_rate = freq; + dev_pm_opp_put(opp); /* diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index e388c0472ba7..0e68f5a70d20 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -67,6 +67,25 @@ struct panthor_irq { }; /** + * enum panthor_device_profiling_mode - Profiling state + */ +enum panthor_device_profiling_flags { + /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */ + PANTHOR_DEVICE_PROFILING_DISABLED = 0, + + /** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */ + PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0), + + /** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */ + PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1), + + /** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */ + PANTHOR_DEVICE_PROFILING_ALL = + PANTHOR_DEVICE_PROFILING_CYCLES | + PANTHOR_DEVICE_PROFILING_TIMESTAMP, +}; + +/** * struct panthor_device - Panthor device */ struct panthor_device { @@ -162,6 +181,20 @@ struct panthor_device { */ struct page *dummy_latest_flush; } pm; + + /** @profile_mask: User-set profiling flags for job accounting. */ + u32 profile_mask; + + /** @current_frequency: Device clock frequency at present. Set by DVFS*/ + unsigned long current_frequency; + + /** @fast_rate: Maximum device clock frequency. Set by DVFS */ + unsigned long fast_rate; +}; + +struct panthor_gpu_usage { + u64 time; + u64 cycles; }; /** @@ -176,6 +209,9 @@ struct panthor_file { /** @groups: Scheduling group pool attached to this file. */ struct panthor_group_pool *groups; + + /** @stats: cycle and timestamp measures for job execution. */ + struct panthor_gpu_usage stats; }; int panthor_device_init(struct panthor_device *ptdev); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index c520f156e2d7..ac7e53f6e3f0 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -3,12 +3,17 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/list.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/pagemap.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/time64.h> #include <drm/drm_auth.h> #include <drm/drm_debugfs.h> @@ -165,6 +170,8 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride, _Generic(_obj_name, \ PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \ PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \ + PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \ + PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \ PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \ @@ -751,10 +758,63 @@ static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx, kvfree(ctx->jobs); } +static int panthor_query_timestamp_info(struct panthor_device *ptdev, + struct drm_panthor_timestamp_info *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(ptdev->base.dev); + if (ret) + return ret; + +#ifdef CONFIG_ARM_ARCH_TIMER + arg->timestamp_frequency = arch_timer_get_cntfrq(); +#else + arg->timestamp_frequency = 0; +#endif + arg->current_timestamp = panthor_gpu_read_timestamp(ptdev); + arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev); + + pm_runtime_put(ptdev->base.dev); + return 0; +} + +static int group_priority_permit(struct drm_file *file, + u8 priority) +{ + /* Ensure that priority is valid */ + if (priority > PANTHOR_GROUP_PRIORITY_REALTIME) + return -EINVAL; + + /* Medium priority and below are always allowed */ + if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM) + return 0; + + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ + if (capable(CAP_SYS_NICE) || drm_is_current_master(file)) + return 0; + + return -EACCES; +} + +static void panthor_query_group_priorities_info(struct drm_file *file, + struct drm_panthor_group_priorities_info *arg) +{ + int prio; + + for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) { + if (!group_priority_permit(file, prio)) + arg->allowed_mask |= BIT(prio); + } +} + static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file) { struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); struct drm_panthor_dev_query *args = data; + struct drm_panthor_timestamp_info timestamp_info; + struct drm_panthor_group_priorities_info priorities_info; + int ret; if (!args->pointer) { switch (args->type) { @@ -766,6 +826,14 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d args->size = sizeof(ptdev->csif_info); return 0; + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + args->size = sizeof(timestamp_info); + return 0; + + case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: + args->size = sizeof(priorities_info); + return 0; + default: return -EINVAL; } @@ -778,6 +846,18 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d case DRM_PANTHOR_DEV_QUERY_CSIF_INFO: return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info); + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + ret = panthor_query_timestamp_info(ptdev, ×tamp_info); + + if (ret) + return ret; + + return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info); + + case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: + panthor_query_group_priorities_info(file, &priorities_info); + return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info); + default: return -EINVAL; } @@ -997,24 +1077,6 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data, return panthor_group_destroy(pfile, args->group_handle); } -static int group_priority_permit(struct drm_file *file, - u8 priority) -{ - /* Ensure that priority is valid */ - if (priority > PANTHOR_GROUP_PRIORITY_HIGH) - return -EINVAL; - - /* Medium priority and below are always allowed */ - if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM) - return 0; - - /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ - if (capable(CAP_SYS_NICE) || drm_is_current_master(file)) - return 0; - - return -EACCES; -} - static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, struct drm_file *file) { @@ -1374,6 +1436,37 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } +static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, + struct panthor_file *pfile, + struct drm_printer *p) +{ + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL) + panthor_fdinfo_gather_group_samples(pfile); + + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) { +#ifdef CONFIG_ARM_ARCH_TIMER + drm_printf(p, "drm-engine-panthor:\t%llu ns\n", + DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC), + arch_timer_get_cntfrq())); +#endif + } + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) + drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles); + + drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate); + drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); +} + +static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + struct drm_device *dev = file->minor->dev; + struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); + + panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); + + drm_show_memory_stats(p, file); +} + static const struct file_operations panthor_drm_driver_fops = { .open = drm_open, .release = drm_release, @@ -1383,6 +1476,7 @@ static const struct file_operations panthor_drm_driver_fops = { .read = drm_read, .llseek = noop_llseek, .mmap = panthor_mmap, + .show_fdinfo = drm_show_fdinfo, .fop_flags = FOP_UNSIGNED_OFFSET, }; @@ -1396,12 +1490,16 @@ static void panthor_debugfs_init(struct drm_minor *minor) /* * PanCSF driver version: * - 1.0 - initial interface + * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query + * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query + * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority */ static const struct drm_driver panthor_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, .open = panthor_open, .postclose = panthor_postclose, + .show_fdinfo = panthor_show_fdinfo, .ioctls = panthor_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls), .fops = &panthor_drm_driver_fops, @@ -1409,7 +1507,7 @@ static const struct drm_driver panthor_drm_driver = { .desc = "Panthor DRM driver", .date = "20230801", .major = 1, - .minor = 0, + .minor = 2, .gem_create_object = panthor_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, @@ -1439,6 +1537,44 @@ static void panthor_remove(struct platform_device *pdev) panthor_device_unplug(ptdev); } +static ssize_t profiling_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", ptdev->profile_mask); +} + +static ssize_t profiling_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct panthor_device *ptdev = dev_get_drvdata(dev); + u32 value; + int err; + + err = kstrtou32(buf, 0, &value); + if (err) + return err; + + if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0) + return -EINVAL; + + ptdev->profile_mask = value; + + return len; +} + +static DEVICE_ATTR_RW(profiling); + +static struct attribute *panthor_attrs[] = { + &dev_attr_profiling.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(panthor); + static const struct of_device_id dt_match[] = { { .compatible = "rockchip,rk3588-mali" }, { .compatible = "arm,mali-valhall-csf" }, @@ -1458,6 +1594,7 @@ static struct platform_driver panthor_driver = { .name = "panthor", .pm = pm_ptr(&panthor_pm_ops), .of_match_table = dt_match, + .dev_groups = panthor_groups, }, }; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 4e2d3a02ea06..ecca5565ce41 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -78,6 +78,12 @@ enum panthor_fw_binary_entry_type { /** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */ CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4, + + /** + * @CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: Metadata about how + * the FW binary was built. + */ + CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA = 6 }; #define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff) @@ -132,6 +138,13 @@ struct panthor_fw_binary_section_entry_hdr { } data; }; +struct panthor_fw_build_info_hdr { + /** @meta_start: Offset of the build info data in the FW binary */ + u32 meta_start; + /** @meta_size: Size of the build info data in the FW binary */ + u32 meta_size; +}; + /** * struct panthor_fw_binary_iter - Firmware binary iterator * @@ -628,6 +641,46 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, return 0; } +static int panthor_fw_read_build_info(struct panthor_device *ptdev, + const struct firmware *fw, + struct panthor_fw_binary_iter *iter, + u32 ehdr) +{ + struct panthor_fw_build_info_hdr hdr; + char header[9]; + const char git_sha_header[sizeof(header)] = "git_sha: "; + int ret; + + ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr)); + if (ret) + return ret; + + if (hdr.meta_start > fw->size || + hdr.meta_start + hdr.meta_size > fw->size) { + drm_err(&ptdev->base, "Firmware build info corrupt\n"); + /* We don't need the build info, so continue */ + return 0; + } + + if (memcmp(git_sha_header, fw->data + hdr.meta_start, + sizeof(git_sha_header))) { + /* Not the expected header, this isn't metadata we understand */ + return 0; + } + + /* Check that the git SHA is NULL terminated as expected */ + if (fw->data[hdr.meta_start + hdr.meta_size - 1] != '\0') { + drm_warn(&ptdev->base, "Firmware's git sha is not NULL terminated\n"); + /* Don't treat as fatal */ + return 0; + } + + drm_info(&ptdev->base, "Firmware git sha: %s\n", + fw->data + hdr.meta_start + sizeof(git_sha_header)); + + return 0; +} + static void panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload) { @@ -672,6 +725,8 @@ static int panthor_fw_load_entry(struct panthor_device *ptdev, switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) { case CSF_FW_BINARY_ENTRY_TYPE_IFACE: return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr); + case CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: + return panthor_fw_read_build_info(ptdev, fw, &eiter, ehdr); /* FIXME: handle those entry types? */ case CSF_FW_BINARY_ENTRY_TYPE_CONFIG: @@ -921,7 +976,7 @@ static int panthor_fw_init_ifaces(struct panthor_device *ptdev) return ret; } - drm_info(&ptdev->base, "CSF FW v%d.%d.%d, Features %#x Instrumentation features %#x", + drm_info(&ptdev->base, "CSF FW using interface v%d.%d.%d, Features %#x Instrumentation features %#x", CSF_IFACE_VERSION_MAJOR(glb_iface->control->version), CSF_IFACE_VERSION_MINOR(glb_iface->control->version), CSF_IFACE_VERSION_PATCH(glb_iface->control->version), diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index be97d56bc011..8244a4e6c2a2 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -150,6 +150,17 @@ panthor_gem_prime_export(struct drm_gem_object *obj, int flags) return drm_gem_prime_export(obj, flags); } +static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) +{ + struct panthor_gem_object *bo = to_panthor_bo(obj); + enum drm_gem_object_status res = 0; + + if (bo->base.base.import_attach || bo->base.pages) + res |= DRM_GEM_OBJECT_RESIDENT; + + return res; +} + static const struct drm_gem_object_funcs panthor_gem_funcs = { .free = panthor_gem_free_object, .print_info = drm_gem_shmem_object_print_info, @@ -159,6 +170,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = panthor_gem_mmap, + .status = panthor_gem_status, .export = panthor_gem_prime_export, .vm_ops = &drm_gem_shmem_vm_ops, }; diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 5251d8764e7d..2d3529a0b156 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -480,3 +480,50 @@ void panthor_gpu_resume(struct panthor_device *ptdev) panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); panthor_gpu_l2_power_on(ptdev); } + +/** + * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset. + * @ptdev: Device. + * @reg: The offset of the register to read. + * + * Return: The counter value. + */ +static u64 +panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg) +{ + u32 hi, lo; + + do { + hi = gpu_read(ptdev, reg + 0x4); + lo = gpu_read(ptdev, reg); + } while (hi != gpu_read(ptdev, reg + 0x4)); + + return ((u64)hi << 32) | lo; +} + +/** + * panthor_gpu_read_timestamp() - Read the timestamp register. + * @ptdev: Device. + * + * Return: The GPU timestamp value. + */ +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev) +{ + return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO); +} + +/** + * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register. + * @ptdev: Device. + * + * Return: The GPU timestamp offset value. + */ +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev) +{ + u32 hi, lo; + + hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI); + lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO); + + return ((u64)hi << 32) | lo; +} diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index bba7555dd3c6..7f6133a66127 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -5,6 +5,8 @@ #ifndef __PANTHOR_GPU_H__ #define __PANTHOR_GPU_H__ +#include <linux/types.h> + struct panthor_device; int panthor_gpu_init(struct panthor_device *ptdev); @@ -48,5 +50,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); int panthor_gpu_soft_reset(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev); #endif diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 0e6f94df690d..a49132f3778b 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -841,7 +841,7 @@ static void panthor_vm_stop(struct panthor_vm *vm) static void panthor_vm_start(struct panthor_vm *vm) { - drm_sched_start(&vm->sched); + drm_sched_start(&vm->sched, 0); } /** @@ -2730,9 +2730,9 @@ int panthor_mmu_init(struct panthor_device *ptdev) * which passes iova as an unsigned long. Patch the mmu_features to reflect this * limitation. */ - if (sizeof(unsigned long) * 8 < va_bits) { + if (va_bits > BITS_PER_LONG) { ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); - ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8; + ptdev->gpu_info.mmu_features |= BITS_PER_LONG; } return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 9929e22f4d8d..ef4bec7ff9c7 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -93,6 +93,9 @@ #define MIN_CSGS 3 #define MAX_CSG_PRIO 0xf +#define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64)) +#define MAX_INSTRS_PER_JOB 24 + struct panthor_group; /** @@ -137,8 +140,6 @@ enum panthor_csg_priority { * non-real-time groups. When such a group becomes executable, * it will evict the group with the lowest non-rt priority if * there's no free group slot available. - * - * Currently not exposed to userspace. */ PANTHOR_CSG_PRIORITY_RT, @@ -476,6 +477,18 @@ struct panthor_queue { */ struct list_head in_flight_jobs; } fence_ctx; + + /** @profiling: Job profiling data slots and access information. */ + struct { + /** @slots: Kernel BO holding the slots. */ + struct panthor_kernel_bo *slots; + + /** @slot_count: Number of jobs ringbuffer can hold at once. */ + u32 slot_count; + + /** @seqno: Index of the next available profiling information slot. */ + u32 seqno; + } profiling; }; /** @@ -605,6 +618,18 @@ struct panthor_group { */ struct panthor_kernel_bo *syncobjs; + /** @fdinfo: Per-file total cycle and timestamp values reference. */ + struct { + /** @data: Total sampled values for jobs in queues from this group. */ + struct panthor_gpu_usage data; + + /** + * @lock: Mutex to govern concurrent access from drm file's fdinfo callback + * and job post-completion processing function + */ + struct mutex lock; + } fdinfo; + /** @state: Group state. */ enum panthor_group_state state; @@ -662,6 +687,18 @@ struct panthor_group { struct list_head wait_node; }; +struct panthor_job_profiling_data { + struct { + u64 before; + u64 after; + } cycles; + + struct { + u64 before; + u64 after; + } time; +}; + /** * group_queue_work() - Queue a group work * @group: Group to queue the work for. @@ -775,6 +812,15 @@ struct panthor_job { /** @done_fence: Fence signaled when the job is finished or cancelled. */ struct dma_fence *done_fence; + + /** @profiling: Job profiling information. */ + struct { + /** @mask: Current device job profiling enablement bitmask. */ + u32 mask; + + /** @slot: Job index in the profiling slots BO. */ + u32 slot; + } profiling; }; static void @@ -839,6 +885,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * panthor_kernel_bo_destroy(queue->ringbuf); panthor_kernel_bo_destroy(queue->iface.mem); + panthor_kernel_bo_destroy(queue->profiling.slots); /* Release the last_fence we were holding, if any. */ dma_fence_put(queue->fence_ctx.last_fence); @@ -853,6 +900,8 @@ static void group_release_work(struct work_struct *work) release_work); u32 i; + mutex_destroy(&group->fdinfo.lock); + for (i = 0; i < group->queue_count; i++) group_free_queue(group, group->queues[i]); @@ -1989,8 +2038,6 @@ tick_ctx_init(struct panthor_scheduler *sched, } } -#define NUM_INSTRS_PER_SLOT 16 - static void group_term_post_processing(struct panthor_group *group) { @@ -2546,7 +2593,7 @@ static void queue_start(struct panthor_queue *queue) list_for_each_entry(job, &queue->scheduler.pending_list, base.list) job->base.s_fence->parent = dma_fence_get(job->done_fence); - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } static void panthor_group_stop(struct panthor_group *group) @@ -2790,6 +2837,41 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) } } +static void update_fdinfo_stats(struct panthor_job *job) +{ + struct panthor_group *group = job->group; + struct panthor_queue *queue = group->queues[job->queue_idx]; + struct panthor_gpu_usage *fdinfo = &group->fdinfo.data; + struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; + struct panthor_job_profiling_data *data = &slots[job->profiling.slot]; + + mutex_lock(&group->fdinfo.lock); + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) + fdinfo->cycles += data->cycles.after - data->cycles.before; + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) + fdinfo->time += data->time.after - data->time.before; + mutex_unlock(&group->fdinfo.lock); +} + +void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) +{ + struct panthor_group_pool *gpool = pfile->groups; + struct panthor_group *group; + unsigned long i; + + if (IS_ERR_OR_NULL(gpool)) + return; + + xa_for_each(&gpool->xa, i, group) { + mutex_lock(&group->fdinfo.lock); + pfile->stats.cycles += group->fdinfo.data.cycles; + pfile->stats.time += group->fdinfo.data.time; + group->fdinfo.data.cycles = 0; + group->fdinfo.data.time = 0; + mutex_unlock(&group->fdinfo.lock); + } +} + static void group_sync_upd_work(struct work_struct *work) { struct panthor_group *group = @@ -2822,6 +2904,8 @@ static void group_sync_upd_work(struct work_struct *work) dma_fence_end_signalling(cookie); list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { + if (job->profiling.mask) + update_fdinfo_stats(job); list_del_init(&job->node); panthor_job_put(&job->base); } @@ -2829,65 +2913,198 @@ static void group_sync_upd_work(struct work_struct *work) group_put(group); } -static struct dma_fence * -queue_run_job(struct drm_sched_job *sched_job) +struct panthor_job_ringbuf_instrs { + u64 buffer[MAX_INSTRS_PER_JOB]; + u32 count; +}; + +struct panthor_job_instr { + u32 profile_mask; + u64 instr; +}; + +#define JOB_INSTR(__prof, __instr) \ + { \ + .profile_mask = __prof, \ + .instr = __instr, \ + } + +static void +copy_instrs_to_ringbuf(struct panthor_queue *queue, + struct panthor_job *job, + struct panthor_job_ringbuf_instrs *instrs) +{ + u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); + u64 start = job->ringbuf.start & (ringbuf_size - 1); + u64 size, written; + + /* + * We need to write a whole slot, including any trailing zeroes + * that may come at the end of it. Also, because instrs.buffer has + * been zero-initialised, there's no need to pad it with 0's + */ + instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); + size = instrs->count * sizeof(u64); + WARN_ON(size > ringbuf_size); + written = min(ringbuf_size - start, size); + + memcpy(queue->ringbuf->kmap + start, instrs->buffer, written); + + if (written < size) + memcpy(queue->ringbuf->kmap, + &instrs->buffer[written / sizeof(u64)], + size - written); +} + +struct panthor_job_cs_params { + u32 profile_mask; + u64 addr_reg; u64 val_reg; + u64 cycle_reg; u64 time_reg; + u64 sync_addr; u64 times_addr; + u64 cs_start; u64 cs_size; + u32 last_flush; u32 waitall_mask; +}; + +static void +get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params) { - struct panthor_job *job = container_of(sched_job, struct panthor_job, base); struct panthor_group *group = job->group; struct panthor_queue *queue = group->queues[job->queue_idx]; struct panthor_device *ptdev = group->ptdev; struct panthor_scheduler *sched = ptdev->scheduler; - u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf); - u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1); - u64 addr_reg = ptdev->csif_info.cs_reg_count - - ptdev->csif_info.unpreserved_cs_reg_count; - u64 val_reg = addr_reg + 2; - u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + - job->queue_idx * sizeof(struct panthor_syncobj_64b); - u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); - struct dma_fence *done_fence; - int ret; - u64 call_instrs[NUM_INSTRS_PER_SLOT] = { - /* MOV32 rX+2, cs.latest_flush */ - (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush, + params->addr_reg = ptdev->csif_info.cs_reg_count - + ptdev->csif_info.unpreserved_cs_reg_count; + params->val_reg = params->addr_reg + 2; + params->cycle_reg = params->addr_reg; + params->time_reg = params->val_reg; - /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ - (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233, + params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + + job->queue_idx * sizeof(struct panthor_syncobj_64b); + params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) + + (job->profiling.slot * sizeof(struct panthor_job_profiling_data)); + params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0); - /* MOV48 rX:rX+1, cs.start */ - (1ull << 56) | (addr_reg << 48) | job->call_info.start, + params->cs_start = job->call_info.start; + params->cs_size = job->call_info.size; + params->last_flush = job->call_info.latest_flush; - /* MOV32 rX+2, cs.size */ - (2ull << 56) | (val_reg << 48) | job->call_info.size, + params->profile_mask = job->profiling.mask; +} - /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ - (3ull << 56) | (1 << 16), +#define JOB_INSTR_ALWAYS(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr)) +#define JOB_INSTR_TIMESTAMP(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr)) +#define JOB_INSTR_CYCLES(instr) \ + JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr)) +static void +prepare_job_instrs(const struct panthor_job_cs_params *params, + struct panthor_job_ringbuf_instrs *instrs) +{ + const struct panthor_job_instr instr_seq[] = { + /* MOV32 rX+2, cs.latest_flush */ + JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush), + /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */ + JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) | + (0 << 16) | 0x233), + /* MOV48 rX:rX+1, cycles_offset */ + JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, cycles.before))), + /* STORE_STATE cycles */ + JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), + /* MOV48 rX:rX+1, time_offset */ + JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, time.before))), + /* STORE_STATE timer */ + JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), + /* MOV48 rX:rX+1, cs.start */ + JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start), + /* MOV32 rX+2, cs.size */ + JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size), + /* WAIT(0) => waits for FLUSH_CACHE2 instruction */ + JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)), /* CALL rX:rX+1, rX+2 */ - (32ull << 56) | (addr_reg << 40) | (val_reg << 32), - + JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) | + (params->val_reg << 32)), + /* MOV48 rX:rX+1, cycles_offset */ + JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, cycles.after))), + /* STORE_STATE cycles */ + JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)), + /* MOV48 rX:rX+1, time_offset */ + JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) | + (params->times_addr + + offsetof(struct panthor_job_profiling_data, time.after))), + /* STORE_STATE timer */ + JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)), /* MOV48 rX:rX+1, sync_addr */ - (1ull << 56) | (addr_reg << 48) | sync_addr, - + JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr), /* MOV48 rX+2, #1 */ - (1ull << 56) | (val_reg << 48) | 1, - + JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1), /* WAIT(all) */ - (3ull << 56) | (waitall_mask << 16), - + JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)), /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/ - (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1, + JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) | + (params->val_reg << 32) | (0 << 16) | 1), + /* ERROR_BARRIER, so we can recover from faults at job boundaries. */ + JOB_INSTR_ALWAYS((47ull << 56)), + }; + u32 pad; - /* ERROR_BARRIER, so we can recover from faults at job - * boundaries. - */ - (47ull << 56), + instrs->count = 0; + + /* NEED to be cacheline aligned to please the prefetcher. */ + static_assert(sizeof(instrs->buffer) % 64 == 0, + "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline"); + + /* Make sure we have enough storage to store the whole sequence. */ + static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) == + ARRAY_SIZE(instrs->buffer), + "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch"); + + for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) { + /* If the profile mask of this instruction is not enabled, skip it. */ + if (instr_seq[i].profile_mask && + !(instr_seq[i].profile_mask & params->profile_mask)) + continue; + + instrs->buffer[instrs->count++] = instr_seq[i].instr; + } + + pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE); + memset(&instrs->buffer[instrs->count], 0, + (pad - instrs->count) * sizeof(instrs->buffer[0])); + instrs->count = pad; +} + +static u32 calc_job_credits(u32 profile_mask) +{ + struct panthor_job_ringbuf_instrs instrs; + struct panthor_job_cs_params params = { + .profile_mask = profile_mask, }; - /* Need to be cacheline aligned to please the prefetcher. */ - static_assert(sizeof(call_instrs) % 64 == 0, - "call_instrs is not aligned on a cacheline"); + prepare_job_instrs(¶ms, &instrs); + return instrs.count; +} + +static struct dma_fence * +queue_run_job(struct drm_sched_job *sched_job) +{ + struct panthor_job *job = container_of(sched_job, struct panthor_job, base); + struct panthor_group *group = job->group; + struct panthor_queue *queue = group->queues[job->queue_idx]; + struct panthor_device *ptdev = group->ptdev; + struct panthor_scheduler *sched = ptdev->scheduler; + struct panthor_job_ringbuf_instrs instrs; + struct panthor_job_cs_params cs_params; + struct dma_fence *done_fence; + int ret; /* Stream size is zero, nothing to do except making sure all previously * submitted jobs are done before we signal the @@ -2914,17 +3131,23 @@ queue_run_job(struct drm_sched_job *sched_job) queue->fence_ctx.id, atomic64_inc_return(&queue->fence_ctx.seqno)); - memcpy(queue->ringbuf->kmap + ringbuf_insert, - call_instrs, sizeof(call_instrs)); + job->profiling.slot = queue->profiling.seqno++; + if (queue->profiling.seqno == queue->profiling.slot_count) + queue->profiling.seqno = 0; + + job->ringbuf.start = queue->iface.input->insert; + + get_job_cs_params(job, &cs_params); + prepare_job_instrs(&cs_params, &instrs); + copy_instrs_to_ringbuf(queue, job, &instrs); + + job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64)); panthor_job_get(&job->base); spin_lock(&queue->fence_ctx.lock); list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs); spin_unlock(&queue->fence_ctx.lock); - job->ringbuf.start = queue->iface.input->insert; - job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs); - /* Make sure the ring buffer is updated before the INSERT * register. */ @@ -3017,6 +3240,33 @@ static const struct drm_sched_backend_ops panthor_queue_sched_ops = { .free_job = queue_free_job, }; +static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, + u32 cs_ringbuf_size) +{ + u32 min_profiled_job_instrs = U32_MAX; + u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL); + + /* + * We want to calculate the minimum size of a profiled job's CS, + * because since they need additional instructions for the sampling + * of performance metrics, they might take up further slots in + * the queue's ringbuffer. This means we might not need as many job + * slots for keeping track of their profiling information. What we + * need is the maximum number of slots we should allocate to this end, + * which matches the maximum number of profiled jobs we can place + * simultaneously in the queue's ring buffer. + * That has to be calculated separately for every single job profiling + * flag, but not in the case job profiling is disabled, since unprofiled + * jobs don't need to keep track of this at all. + */ + for (u32 i = 0; i < last_flag; i++) { + min_profiled_job_instrs = + min(min_profiled_job_instrs, calc_job_credits(BIT(i))); + } + + return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); +} + static struct panthor_queue * group_create_queue(struct panthor_group *group, const struct drm_panthor_queue_create *args) @@ -3070,9 +3320,35 @@ group_create_queue(struct panthor_group *group, goto err_free_queue; } + queue->profiling.slot_count = + calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size); + + queue->profiling.slots = + panthor_kernel_bo_create(group->ptdev, group->vm, + queue->profiling.slot_count * + sizeof(struct panthor_job_profiling_data), + DRM_PANTHOR_BO_NO_MMAP, + DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | + DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, + PANTHOR_VM_KERNEL_AUTO_VA); + + if (IS_ERR(queue->profiling.slots)) { + ret = PTR_ERR(queue->profiling.slots); + goto err_free_queue; + } + + ret = panthor_kernel_bo_vmap(queue->profiling.slots); + if (ret) + goto err_free_queue; + + /* + * Credit limit argument tells us the total number of instructions + * across all CS slots in the ringbuffer, with some jobs requiring + * twice as many as others, depending on their profiling status. + */ ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, group->ptdev->scheduler->wq, 1, - args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)), + args->ringbuf_size / sizeof(u64), 0, msecs_to_jiffies(JOB_TIMEOUT_MS), group->ptdev->reset.wq, NULL, "panthor-queue", group->ptdev->base.dev); @@ -3213,6 +3489,8 @@ int panthor_group_create(struct panthor_file *pfile, } mutex_unlock(&sched->reset.lock); + mutex_init(&group->fdinfo.lock); + return gid; err_put_group: @@ -3380,6 +3658,7 @@ panthor_job_create(struct panthor_file *pfile, { struct panthor_group_pool *gpool = pfile->groups; struct panthor_job *job; + u32 credits; int ret; if (qsubmit->pad) @@ -3438,9 +3717,16 @@ panthor_job_create(struct panthor_file *pfile, } } + job->profiling.mask = pfile->ptdev->profile_mask; + credits = calc_job_credits(job->profiling.mask); + if (credits == 0) { + ret = -EINVAL; + goto err_put_job; + } + ret = drm_sched_job_init(&job->base, &job->group->queues[job->queue_idx]->entity, - 1, job->group); + credits, job->group); if (ret) goto err_put_job; diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 3a30d2328b30..5ae6b4bde7c5 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -47,4 +47,6 @@ void panthor_sched_resume(struct panthor_device *ptdev); void panthor_sched_report_mmu_fault(struct panthor_device *ptdev); void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events); +void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile); + #endif diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig index 20fe1d2c0aaf..82e918820950 100644 --- a/drivers/gpu/drm/pl111/Kconfig +++ b/drivers/gpu/drm/pl111/Kconfig @@ -5,6 +5,7 @@ config DRM_PL111 depends on ARM || ARM64 || COMPILE_TEST depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_BRIDGE diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 02e6b74d5016..13362150b9c6 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -47,6 +47,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> @@ -225,6 +226,7 @@ static const struct drm_driver pl111_drm_driver = { .patchlevel = 0, .dumb_create = drm_gem_dma_dumb_create, .gem_prime_import_sg_table = pl111_gem_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, @@ -305,7 +307,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev, if (ret < 0) goto dev_put; - drm_fbdev_dma_setup(drm, priv->variant->fb_depth); + drm_client_setup_with_color_mode(drm, priv->variant->fb_depth); return 0; diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index ca3f51c2a8fe..98a148bea628 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_QXL tristate "QXL virtual GPU" - depends on DRM && PCI && MMU + depends on DRM && PCI && MMU && HAS_IOPORT + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_TTM select DRM_TTM_HELPER diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 5eb3f5719fdf..21f752644242 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -29,13 +29,14 @@ #include "qxl_drv.h" +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vgaarb.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_file.h> @@ -91,7 +92,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, qxl_driver.name); if (ret) goto disable_pci; @@ -118,7 +119,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto modeset_cleanup; - drm_fbdev_ttm_setup(&qdev->ddev, 32); + drm_client_setup(&qdev->ddev, NULL); return 0; modeset_cleanup: @@ -293,6 +294,7 @@ static struct drm_driver qxl_driver = { .debugfs_init = qxl_debugfs_init, #endif .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, .fops = &qxl_fops, .ioctls = qxl_ioctls, .num_ioctls = ARRAY_SIZE(qxl_ioctls), diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index f98356be0af2..9c6c74a75778 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -5,6 +5,7 @@ config DRM_RADEON depends on DRM && PCI && MMU depends on AGP || !AGP select FW_LOADER + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 1b2d31c4d77c..ac77d1246b94 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = radeon_get_ib_value(p, idx+1) << 8; + offset = (u64)radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", offset, track->vgt_strmout_bo_offset[idx_value]); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 554b236c2328..6f071e61f764 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -35,6 +35,7 @@ #include <linux/vgaarb.h> #include <drm/drm_cache.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> #include <drm/drm_file.h> @@ -1542,7 +1543,7 @@ void radeon_device_fini(struct radeon_device *rdev) * Called at driver suspend. */ int radeon_suspend_kms(struct drm_device *dev, bool suspend, - bool fbcon, bool freeze) + bool notify_clients, bool freeze) { struct radeon_device *rdev; struct pci_dev *pdev; @@ -1634,9 +1635,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (fbcon) { + if (notify_clients) { console_lock(); - radeon_fbdev_set_suspend(rdev, 1); + drm_client_dev_suspend(dev, true); console_unlock(); } return 0; @@ -1649,7 +1650,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, * Returns 0 for success or an error on failure. * Called at driver resume. */ -int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; @@ -1660,14 +1661,14 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { + if (notify_clients) { console_lock(); } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pci_enable_device(pdev)) { - if (fbcon) + if (notify_clients) console_unlock(); return -1; } @@ -1730,7 +1731,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ - if (fbcon) { + if (notify_clients) { drm_helper_resume_force_mode(dev); /* turn on display hw */ drm_modeset_lock_all(dev); @@ -1746,8 +1747,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); + if (notify_clients) { + drm_client_dev_resume(dev, true); console_unlock(); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e5a6f3e7c75b..23d6d1a2586d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -29,7 +29,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - +#include <linux/aperture.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,9 +37,10 @@ #include <linux/mmu_notifier.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> #include <drm/drm_pciids.h> @@ -261,6 +262,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, unsigned long flags = 0; struct drm_device *ddev; struct radeon_device *rdev; + const struct drm_format_info *format; int ret; if (!ent) @@ -297,7 +299,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, kms_driver.name); if (ret) return ret; @@ -324,7 +326,14 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) goto err_agp; - radeon_fbdev_setup(ddev->dev_private); + if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_C8); + else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_RGB565); + else + format = NULL; + + drm_client_setup(ddev, format); return 0; @@ -591,6 +600,8 @@ static const struct drm_driver kms_driver = { .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, + RADEON_FBDEV_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index fb70de29545c..d4a58bd679db 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -198,12 +198,11 @@ static const struct fb_ops radeon_fbdev_fb_ops = { .fb_destroy = radeon_fbdev_fb_destroy, }; -/* - * Fbdev helpers and struct drm_fb_helper_funcs - */ +static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { +}; -static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_mode_fb_cmd2 mode_cmd = { }; @@ -243,6 +242,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, } /* setup helper */ + fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; fb_helper->fb = fb; /* okay we have an object now allocate the framebuffer */ @@ -288,116 +288,6 @@ err_radeon_fbdev_destroy_pinned_object: return ret; } -static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { - .fb_probe = radeon_fbdev_fb_helper_fb_probe, -}; - -/* - * Fbdev client and struct drm_client_funcs - */ - -static void radeon_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = fb_helper->dev; - struct radeon_device *rdev = dev->dev_private; - - if (fb_helper->info) { - vga_switcheroo_client_fb_set(rdev->pdev, NULL); - drm_helper_force_disable_all(dev); - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int radeon_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - vga_switcheroo_process_delayed_switch(); - - return 0; -} - -static int radeon_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - struct radeon_device *rdev = dev->dev_private; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - vga_switcheroo_client_fb_set(rdev->pdev, fb_helper->info); - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup radeon fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs radeon_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = radeon_fbdev_client_unregister, - .restore = radeon_fbdev_client_restore, - .hotplug = radeon_fbdev_client_hotplug, -}; - -void radeon_fbdev_setup(struct radeon_device *rdev) -{ - struct drm_fb_helper *fb_helper; - int bpp_sel = 32; - int ret; - - if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) - bpp_sel = 8; - else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) - bpp_sel = 16; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(rdev_to_drm(rdev), fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs); - - ret = drm_client_init(rdev_to_drm(rdev), &fb_helper->client, "radeon-fbdev", - &radeon_fbdev_client_funcs); - if (ret) { - drm_err(rdev_to_drm(rdev), "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} - -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ - if (rdev_to_drm(rdev)->fb_helper) - drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state); -} - bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 421c83fc70dc..4063d3801e81 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -38,6 +38,9 @@ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct edid; struct drm_edid; struct radeon_bo; @@ -935,14 +938,14 @@ void dce8_program_fmt(struct drm_encoder *encoder); /* fbdev layer */ #if defined(CONFIG_DRM_FBDEV_EMULATION) -void radeon_fbdev_setup(struct radeon_device *rdev); -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = radeon_fbdev_driver_fbdev_probe bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); #else -static inline void radeon_fbdev_setup(struct radeon_device *rdev) -{ } -static inline void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ } +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { return false; diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig index e1f41468a9a6..840305fdeb49 100644 --- a/drivers/gpu/drm/renesas/rcar-du/Kconfig +++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST depends on ARCH_RENESAS || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index fb719d9aff10..4e0bafc86f50 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -19,6 +19,7 @@ #include <linux/wait.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -606,6 +607,7 @@ static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rcar_du_dumb_create, .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rcar_du_fops, .name = "rcar-du", .desc = "Renesas R-Car Display Unit", @@ -716,7 +718,7 @@ static int rcar_du_probe(struct platform_device *pdev) drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev)); - drm_fbdev_dma_setup(&rcdu->ddev, 32); + drm_client_setup(&rcdu->ddev, NULL); return 0; diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c index e445fac8e0b4..c546ab0805d6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c @@ -680,6 +680,12 @@ static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = { .atomic_update = rcar_du_plane_atomic_update, }; +static const struct drm_plane_helper_funcs rcar_du_primary_plane_helper_funcs = { + .atomic_check = rcar_du_plane_atomic_check, + .atomic_update = rcar_du_plane_atomic_update, + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, +}; + static struct drm_plane_state * rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane) { @@ -812,8 +818,12 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) if (ret < 0) return ret; - drm_plane_helper_add(&plane->plane, - &rcar_du_plane_helper_funcs); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&plane->plane, + &rcar_du_primary_plane_helper_funcs); + else + drm_plane_helper_add(&plane->plane, + &rcar_du_plane_helper_funcs); drm_plane_create_alpha_property(&plane->plane); diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig index 89bdb598e0ae..7c1817240846 100644 --- a/drivers/gpu/drm/renesas/rz-du/Kconfig +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RZG2L_DU depends on ARCH_RZG2L || COMPILE_TEST depends on DRM && OF depends on VIDEO_RENESAS_VSP1 + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c index bc7c381f92ac..bbd7003335da 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -79,6 +80,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops); static const struct drm_driver rzg2l_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rzg2l_du_dumb_create, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rzg2l_du_fops, .name = "rzg2l-du", .desc = "Renesas RZ/G2L Display Unit", @@ -160,7 +162,7 @@ static int rzg2l_du_probe(struct platform_device *pdev) drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev)); - drm_fbdev_dma_setup(&rcdu->ddev, 32); + drm_client_setup(&rcdu->ddev, NULL); return 0; diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig index c329ab8a7a8b..52e160464001 100644 --- a/drivers/gpu/drm/renesas/shmobile/Kconfig +++ b/drivers/gpu/drm/renesas/shmobile/Kconfig @@ -4,6 +4,7 @@ config DRM_SHMOBILE depends on DRM && PM depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c index ff2883c7fd46..8d3effe3f598 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c @@ -18,8 +18,10 @@ #include <linux/slab.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> @@ -101,6 +103,7 @@ DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops); static const struct drm_driver shmob_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &shmob_drm_fops, .name = "shmob-drm", .desc = "Renesas SH Mobile DRM", @@ -257,7 +260,7 @@ static int shmob_drm_probe(struct platform_device *pdev) if (ret < 0) goto err_modeset_cleanup; - drm_fbdev_dma_setup(ddev, 16); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c index 07ad17d24294..9d166ab2af8b 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c @@ -273,6 +273,13 @@ static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = { .atomic_disable = shmob_drm_plane_atomic_disable, }; +static const struct drm_plane_helper_funcs shmob_drm_primary_plane_helper_funcs = { + .atomic_check = shmob_drm_plane_atomic_check, + .atomic_update = shmob_drm_plane_atomic_update, + .atomic_disable = shmob_drm_plane_atomic_disable, + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, +}; + static const struct drm_plane_funcs shmob_drm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -310,7 +317,12 @@ struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev, splane->index = index; - drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&splane->base, + &shmob_drm_primary_plane_helper_funcs); + else + drm_plane_helper_add(&splane->base, + &shmob_drm_plane_helper_funcs); return &splane->base; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 23c49e91f1cc..3ac579615749 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -2,12 +2,14 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI @@ -63,6 +65,14 @@ config ROCKCHIP_DW_HDMI enable HDMI on RK3288 or RK3399 based SoC, you should select this option. +config ROCKCHIP_DW_HDMI_QP + bool "Rockchip specific extensions for Synopsys DW HDMI QP" + select DRM_BRIDGE_CONNECTOR + help + This selects support for Rockchip SoC specific extensions + for the Synopsys DesignWare HDMI QP driver. If you want to + enable HDMI on RK3588 based SoC, you should select this option. + config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" select GENERIC_PHY_MIPI_DPHY diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 3ff7b21c0414..3eab662a5a1d 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -11,6 +11,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_VOP) += rockchip_drm_vop.o rockchip_vop_reg.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o +rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h index 441248b7a79e..c7780ae3272a 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h @@ -77,7 +77,7 @@ #define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830 #define SOURCE_PIF_SW_RESET 0x30834 -/* bellow registers need access by mailbox */ +/* below registers need access by mailbox */ /* source car addr */ #define SOURCE_HDTX_CAR 0x0900 #define SOURCE_DPTX_CAR 0x0904 diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 240552eb517f..96e1097f993d 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -76,6 +76,7 @@ struct rockchip_hdmi { struct rockchip_encoder encoder; const struct rockchip_hdmi_chip_data *chip_data; const struct dw_hdmi_plat_data *plat_data; + struct clk *hdmiphy_clk; struct clk *ref_clk; struct clk *grf_clk; struct dw_hdmi *hdmi; @@ -91,74 +92,70 @@ static struct rockchip_hdmi *to_rockchip_hdmi(struct drm_encoder *encoder) static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { { - 27000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 30666000, { + { 0x00b3, 0x0000 }, + { 0x2153, 0x0000 }, + { 0x40f3, 0x0000 }, }, }, { - 36000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 36800000, { + { 0x00b3, 0x0000 }, + { 0x2153, 0x0000 }, + { 0x40a2, 0x0001 }, }, }, { - 40000000, { - { 0x00b3, 0x0000}, - { 0x2153, 0x0000}, - { 0x40f3, 0x0000} + 46000000, { + { 0x00b3, 0x0000 }, + { 0x2142, 0x0001 }, + { 0x40a2, 0x0001 }, }, }, { - 54000000, { - { 0x0072, 0x0001}, - { 0x2142, 0x0001}, - { 0x40a2, 0x0001}, + 61333000, { + { 0x0072, 0x0001 }, + { 0x2142, 0x0001 }, + { 0x40a2, 0x0001 }, }, }, { - 65000000, { - { 0x0072, 0x0001}, - { 0x2142, 0x0001}, - { 0x40a2, 0x0001}, + 73600000, { + { 0x0072, 0x0001 }, + { 0x2142, 0x0001 }, + { 0x4061, 0x0002 }, }, }, { - 66000000, { - { 0x013e, 0x0003}, - { 0x217e, 0x0002}, - { 0x4061, 0x0002} + 92000000, { + { 0x0072, 0x0001 }, + { 0x2145, 0x0002 }, + { 0x4061, 0x0002 }, }, }, { - 74250000, { - { 0x0072, 0x0001}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 122666000, { + { 0x0051, 0x0002 }, + { 0x2145, 0x0002 }, + { 0x4061, 0x0002 }, }, }, { - 83500000, { - { 0x0072, 0x0001}, + 147200000, { + { 0x0051, 0x0002 }, + { 0x2145, 0x0002 }, + { 0x4064, 0x0003 }, }, }, { - 108000000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 184000000, { + { 0x0051, 0x0002 }, + { 0x214c, 0x0003 }, + { 0x4064, 0x0003 }, }, }, { - 106500000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} - }, - }, { - 146250000, { - { 0x0051, 0x0002}, - { 0x2145, 0x0002}, - { 0x4061, 0x0002} + 226666000, { + { 0x0040, 0x0003 }, + { 0x214c, 0x0003 }, + { 0x4064, 0x0003 }, }, }, { - 148500000, { - { 0x0051, 0x0003}, - { 0x214c, 0x0003}, - { 0x4064, 0x0003} + 272000000, { + { 0x0040, 0x0003 }, + { 0x214c, 0x0003 }, + { 0x5a64, 0x0003 }, }, }, { 340000000, { @@ -167,10 +164,16 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { { 0x5a64, 0x0003 }, }, }, { + 600000000, { + { 0x1a40, 0x0003 }, + { 0x3b4c, 0x0003 }, + { 0x5a64, 0x0003 }, + }, + }, { ~0UL, { - { 0x00a0, 0x000a }, - { 0x2001, 0x000f }, - { 0x4002, 0x000f }, + { 0x0000, 0x0000 }, + { 0x0000, 0x0000 }, + { 0x0000, 0x0000 }, }, } }; @@ -178,31 +181,18 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = { static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ { - 40000000, { 0x0018, 0x0018, 0x0018 }, - }, { - 65000000, { 0x0028, 0x0028, 0x0028 }, - }, { - 66000000, { 0x0038, 0x0038, 0x0038 }, - }, { - 74250000, { 0x0028, 0x0038, 0x0038 }, - }, { - 83500000, { 0x0028, 0x0038, 0x0038 }, - }, { - 146250000, { 0x0038, 0x0038, 0x0038 }, - }, { - 148500000, { 0x0000, 0x0038, 0x0038 }, - }, { 600000000, { 0x0000, 0x0000, 0x0000 }, }, { - ~0UL, { 0x0000, 0x0000, 0x0000}, + ~0UL, { 0x0000, 0x0000, 0x0000 }, } }; static const struct dw_hdmi_phy_config rockchip_phy_config[] = { /*pixelclk symbol term vlev*/ { 74250000, 0x8009, 0x0004, 0x0272}, - { 148500000, 0x802b, 0x0004, 0x028d}, + { 165000000, 0x802b, 0x0004, 0x0209}, { 297000000, 0x8039, 0x0005, 0x028d}, + { 594000000, 0x8039, 0x0000, 0x019d}, { ~0UL, 0x0000, 0x0000, 0x0000} }; @@ -251,10 +241,7 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data, const struct drm_display_mode *mode) { struct rockchip_hdmi *hdmi = data; - const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg; int pclk = mode->clock * 1000; - bool exact_match = hdmi->plat_data->phy_force_vendor; - int i; if (hdmi->chip_data->max_tmds_clock && mode->clock > hdmi->chip_data->max_tmds_clock) @@ -263,26 +250,18 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data, if (hdmi->ref_clk) { int rpclk = clk_round_rate(hdmi->ref_clk, pclk); - if (abs(rpclk - pclk) > pclk / 1000) + if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000) return MODE_NOCLOCK; } - for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) { - /* - * For vendor specific phys force an exact match of the pixelclock - * to preserve the original behaviour of the driver. - */ - if (exact_match && pclk == mpll_cfg[i].mpixelclock) - return MODE_OK; - /* - * The Synopsys phy can work with pixelclocks up to the value given - * in the corresponding mpll_cfg entry. - */ - if (!exact_match && pclk <= mpll_cfg[i].mpixelclock) - return MODE_OK; + if (hdmi->hdmiphy_clk) { + int rpclk = clk_round_rate(hdmi->hdmiphy_clk, pclk); + + if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000) + return MODE_NOCLOCK; } - return MODE_BAD; + return MODE_OK; } static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder) @@ -502,7 +481,7 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = { .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL), .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL), - .max_tmds_clock = 340000, + .max_tmds_clock = 594000, }; static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { @@ -516,7 +495,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { static struct rockchip_hdmi_chip_data rk3568_chip_data = { .lcdsel_grf_reg = -1, - .max_tmds_clock = 340000, + .max_tmds_clock = 594000, }; static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = { @@ -607,6 +586,15 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + if (hdmi->phy) { + struct of_phandle_args clkspec; + + clkspec.np = hdmi->phy->dev.of_node; + hdmi->hdmiphy_clk = of_clk_get_from_provider(&clkspec); + if (IS_ERR(hdmi->hdmiphy_clk)) + hdmi->hdmiphy_clk = NULL; + } + if (hdmi->chip_data == &rk3568_chip_data) { regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1, HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK | diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c new file mode 100644 index 000000000000..9c796ee4c303 --- /dev/null +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + * + * Author: Algea Cao <algea.cao@rock-chips.com> + * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> +#include <linux/workqueue.h> + +#include <drm/bridge/dw_hdmi_qp.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "rockchip_drm_drv.h" + +#define RK3588_GRF_SOC_CON2 0x0308 +#define RK3588_HDMI0_HPD_INT_MSK BIT(13) +#define RK3588_HDMI0_HPD_INT_CLR BIT(12) +#define RK3588_GRF_SOC_CON7 0x031c +#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12) +#define RK3588_GRF_SOC_STATUS1 0x0384 +#define RK3588_HDMI0_LEVEL_INT BIT(16) +#define RK3588_GRF_VO1_CON3 0x000c +#define RK3588_SCLIN_MASK BIT(9) +#define RK3588_SDAIN_MASK BIT(10) +#define RK3588_MODE_MASK BIT(11) +#define RK3588_I2S_SEL_MASK BIT(13) +#define RK3588_GRF_VO1_CON9 0x0024 +#define RK3588_HDMI0_GRANT_SEL BIT(10) + +#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) +#define HOTPLUG_DEBOUNCE_MS 150 + +struct rockchip_hdmi_qp { + struct device *dev; + struct regmap *regmap; + struct regmap *vo_regmap; + struct rockchip_encoder encoder; + struct clk *ref_clk; + struct dw_hdmi_qp *hdmi; + struct phy *phy; + struct gpio_desc *enable_gpio; + struct delayed_work hpd_work; +}; + +static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder) +{ + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + return container_of(rkencoder, struct rockchip_hdmi_qp, encoder); +} + +static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder) +{ + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); + struct drm_crtc *crtc = encoder->crtc; + unsigned long long rate; + + /* Unconditionally switch to TMDS as FRL is not yet supported */ + gpiod_set_value(hdmi->enable_gpio, 1); + + if (crtc && crtc->state) { + rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, + 8, HDMI_COLORSPACE_RGB); + clk_set_rate(hdmi->ref_clk, rate); + /* + * FIXME: Temporary workaround to pass pixel clock rate + * to the PHY driver until phy_configure_opts_hdmi + * becomes available in the PHY API. See also the related + * comment in rk_hdptx_phy_power_on() from + * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c + */ + phy_set_bus_width(hdmi->phy, rate / 100); + } +} + +static int +dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_HDMIA; + + return 0; +} + +static const struct +drm_encoder_helper_funcs dw_hdmi_qp_rockchip_encoder_helper_funcs = { + .enable = dw_hdmi_qp_rockchip_encoder_enable, + .atomic_check = dw_hdmi_qp_rockchip_encoder_atomic_check, +}; + +static int dw_hdmi_qp_rk3588_phy_init(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + return phy_power_on(hdmi->phy); +} + +static void dw_hdmi_qp_rk3588_phy_disable(struct dw_hdmi_qp *dw_hdmi, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + phy_power_off(hdmi->phy); +} + +static enum drm_connector_status +dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + u32 val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val); + + return val & RK3588_HDMI0_LEVEL_INT ? + connector_status_connected : connector_status_disconnected; +} + +static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data) +{ + struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data; + + regmap_write(hdmi->regmap, + RK3588_GRF_SOC_CON2, + HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR | + RK3588_HDMI0_HPD_INT_MSK)); +} + +static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = { + .init = dw_hdmi_qp_rk3588_phy_init, + .disable = dw_hdmi_qp_rk3588_phy_disable, + .read_hpd = dw_hdmi_qp_rk3588_read_hpd, + .setup_hpd = dw_hdmi_qp_rk3588_setup_hpd, +}; + +static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work) +{ + struct rockchip_hdmi_qp *hdmi = container_of(work, + struct rockchip_hdmi_qp, + hpd_work.work); + struct drm_device *drm = hdmi->encoder.encoder.dev; + bool changed; + + if (drm) { + changed = drm_helper_hpd_irq_event(drm); + if (changed) + drm_dbg(hdmi, "connector status changed\n"); + } +} + +static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + + if (intr_stat) { + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, + RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id) +{ + struct rockchip_hdmi_qp *hdmi = dev_id; + u32 intr_stat, val; + + regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat); + if (!intr_stat) + return IRQ_NONE; + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR, + RK3588_HDMI0_HPD_INT_CLR); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + mod_delayed_work(system_wq, &hdmi->hpd_work, + msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); + + val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + return IRQ_HANDLED; +} + +static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3588-dw-hdmi-qp", + .data = &rk3588_hdmi_phy_ops }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids); + +static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, + void *data) +{ + static const char * const clk_names[] = { + "pclk", "earc", "aud", "hdp", "hclk_vo1", + "ref" /* keep "ref" last */ + }; + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_qp_plat_data plat_data; + struct drm_device *drm = data; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct rockchip_hdmi_qp *hdmi; + struct clk *clk; + int ret, irq, i; + u32 val; + + if (!pdev->dev.of_node) + return -ENODEV; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + plat_data.phy_ops = of_device_get_match_data(dev); + if (!plat_data.phy_ops) + return -ENODEV; + + plat_data.phy_data = hdmi; + hdmi->dev = &pdev->dev; + + encoder = &hdmi->encoder.encoder; + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); + + rockchip_drm_encoder_set_crtc_endpoint_id(&hdmi->encoder, + dev->of_node, 0, 0); + /* + * If we failed to find the CRTC(s) which this encoder is + * supposed to be connected to, it's because the CRTC has + * not been registered yet. Defer probing, and hope that + * the required CRTC is added later. + */ + if (encoder->possible_crtcs == 0) + return -EPROBE_DEFER; + + hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); + if (IS_ERR(hdmi->regmap)) { + drm_err(hdmi, "Unable to get rockchip,grf\n"); + return PTR_ERR(hdmi->regmap); + } + + hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,vo-grf"); + if (IS_ERR(hdmi->vo_regmap)) { + drm_err(hdmi, "Unable to get rockchip,vo-grf\n"); + return PTR_ERR(hdmi->vo_regmap); + } + + for (i = 0; i < ARRAY_SIZE(clk_names); i++) { + clk = devm_clk_get_enabled(hdmi->dev, clk_names[i]); + + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "Failed to get %s clock: %d\n", + clk_names[i], ret); + return ret; + } + } + hdmi->ref_clk = clk; + + hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", + GPIOD_OUT_HIGH); + if (IS_ERR(hdmi->enable_gpio)) { + ret = PTR_ERR(hdmi->enable_gpio); + drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret); + return ret; + } + + hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); + if (IS_ERR(hdmi->phy)) { + ret = PTR_ERR(hdmi->phy); + if (ret != -EPROBE_DEFER) + drm_err(hdmi, "failed to get phy: %d\n", ret); + return ret; + } + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); + + INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work); + + plat_data.main_irq = platform_get_irq_byname(pdev, "main"); + if (plat_data.main_irq < 0) + return plat_data.main_irq; + + irq = platform_get_irq_byname(pdev, "hpd"); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(hdmi->dev, irq, + dw_hdmi_qp_rk3588_hardirq, + dw_hdmi_qp_rk3588_irq, + IRQF_SHARED, "dw-hdmi-qp-hpd", + hdmi); + if (ret) + return ret; + + drm_encoder_helper_add(encoder, &dw_hdmi_qp_rockchip_encoder_helper_funcs); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); + + platform_set_drvdata(pdev, hdmi); + + hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data); + if (IS_ERR(hdmi->hdmi)) { + ret = PTR_ERR(hdmi->hdmi); + drm_encoder_cleanup(encoder); + return ret; + } + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + drm_err(hdmi, "failed to init bridge connector: %d\n", ret); + return ret; + } + + return drm_connector_attach_encoder(connector, encoder); +} + +static void dw_hdmi_qp_rockchip_unbind(struct device *dev, + struct device *master, + void *data) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&hdmi->hpd_work); + + drm_encoder_cleanup(&hdmi->encoder.encoder); +} + +static const struct component_ops dw_hdmi_qp_rockchip_ops = { + .bind = dw_hdmi_qp_rockchip_bind, + .unbind = dw_hdmi_qp_rockchip_unbind, +}; + +static int dw_hdmi_qp_rockchip_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops); +} + +static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev) +{ + struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev); + u32 val; + + val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) | + HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) | + HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) | + HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON3, val); + + val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, + RK3588_SET_HPD_PATH_MASK); + regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val); + + val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, + RK3588_HDMI0_GRANT_SEL); + regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val); + + dw_hdmi_qp_resume(dev, hdmi->hdmi); + + if (hdmi->encoder.encoder.dev) + drm_helper_hpd_irq_event(hdmi->encoder.encoder.dev); + + return 0; +} + +static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume) +}; + +struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = { + .probe = dw_hdmi_qp_rockchip_probe, + .remove = dw_hdmi_qp_rockchip_remove, + .driver = { + .name = "dwhdmiqp-rockchip", + .pm = &dw_hdmi_qp_rockchip_pm, + .of_match_table = dw_hdmi_qp_rockchip_dt_ids, + }, +}; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 11e5d10de4d7..585355de696b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -6,6 +6,7 @@ * based on exynos_drm_drv.c */ +#include <linux/aperture.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -16,7 +17,7 @@ #include <linux/console.h> #include <linux/iommu.h> -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -145,7 +146,7 @@ static int rockchip_drm_bind(struct device *dev) int ret; /* Remove existing drivers that may own the framebuffer memory. */ - ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver); + ret = aperture_remove_all_conflicting_devices(rockchip_drm_driver.name); if (ret) { DRM_DEV_ERROR(dev, "Failed to remove existing framebuffers - %d.\n", @@ -195,7 +196,7 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_kms_helper_poll_fini; - drm_fbdev_dma_setup(drm_dev, 0); + drm_client_setup(drm_dev, NULL); return 0; err_kms_helper_poll_fini: @@ -230,6 +231,7 @@ static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = rockchip_gem_dumb_create, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -358,11 +360,34 @@ static void rockchip_drm_match_remove(struct device *dev) device_link_del(link); } +/* list of preferred vop devices */ +static const char *const rockchip_drm_match_preferred[] = { + "rockchip,rk3399-vop-big", + NULL, +}; + static struct component_match *rockchip_drm_match_add(struct device *dev) { struct component_match *match = NULL; + struct device_node *port; int i; + /* add preferred vop device match before adding driver device matches */ + for (i = 0; ; i++) { + port = of_parse_phandle(dev->of_node, "ports", i); + if (!port) + break; + + if (of_device_is_available(port->parent) && + of_device_compatible_match(port->parent, + rockchip_drm_match_preferred)) + drm_of_component_match_add(dev, &match, + component_compare_of, + port->parent); + + of_node_put(port); + } + for (i = 0; i < num_rockchip_sub_drivers; i++) { struct platform_driver *drv = rockchip_sub_drivers[i]; struct device *p = NULL, *d; @@ -507,6 +532,8 @@ static int __init rockchip_drm_init(void) ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP); ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, CONFIG_ROCKCHIP_DW_HDMI); + ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver, + CONFIG_ROCKCHIP_DW_HDMI_QP); ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver, CONFIG_ROCKCHIP_DW_MIPI_DSI); ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 8d566fcd80a2..24b4ce5ceaf1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -88,6 +88,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder, int rockchip_drm_endpoint_is_subdriver(struct device_node *ep); extern struct platform_driver cdn_dp_driver; extern struct platform_driver dw_hdmi_rockchip_pltfm_driver; +extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver; extern struct platform_driver dw_mipi_dsi_rockchip_driver; extern struct platform_driver inno_hdmi_driver; extern struct platform_driver rockchip_dp_driver; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index a75eede8bf8d..69bcf0e99d57 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -51,7 +51,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, /* We start in an idle state. */ complete_all(&entity->entity_idle); - spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->lock); spsc_queue_init(&entity->job_queue); atomic_set(&entity->fence_seq, 0); @@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, { WARN_ON(!num_sched_list || !sched_list); - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->sched_list = sched_list; entity->num_sched_list = num_sched_list; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_modify_sched); @@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) if (!entity->rq) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); /* Make sure this entity is not used by the scheduler at the moment */ wait_for_completion(&entity->entity_idle); @@ -372,8 +372,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, } /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and - * wake up scheduler + * drm_sched_entity_wakeup - callback to clear the entity's dependency and + * wake up the scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) @@ -391,14 +391,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) { - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->priority = priority; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_set_priority); @@ -514,8 +514,17 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) struct drm_sched_job *next; next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); - if (next) - drm_sched_rq_update_fifo(entity, next->submit_ts); + if (next) { + struct drm_sched_rq *rq; + + spin_lock(&entity->lock); + rq = entity->rq; + spin_lock(&rq->lock); + drm_sched_rq_update_fifo_locked(entity, rq, + next->submit_ts); + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); + } } /* Jobs and entities might have different lifecycles. Since we're @@ -555,14 +564,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) if (fence && !dma_fence_is_signaled(fence)) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); rq = sched ? sched->sched_rq[entity->priority] : NULL; if (rq != entity->rq) { drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; } - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); if (entity->num_sched_list == 1) entity->sched_list = NULL; @@ -576,8 +585,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { @@ -603,9 +610,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) struct drm_sched_rq *rq; /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); if (entity->stopped) { - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); DRM_ERROR("Trying to push to a killed entity\n"); return; @@ -614,11 +621,14 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) rq = entity->rq; sched = rq->sched; + spin_lock(&rq->lock); drm_sched_rq_add_entity(rq, entity); - spin_unlock(&entity->rq_lock); if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_update_fifo(entity, submit_ts); + drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); + + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); drm_sched_wakeup(sched); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e97c6c60bc96..7ce25281c74c 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,7 +41,7 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on * the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order in which they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore @@ -159,35 +159,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); } -static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) +static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) { - struct drm_sched_rq *rq = entity->rq; - if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); RB_CLEAR_NODE(&entity->rb_tree_node); } } -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + ktime_t ts) { /* * Both locks need to be grabbed, one to protect from entity->rq change * for entity from within concurrent drm_sched_entity_select_rq and the * other to update the rb tree structure. */ - spin_lock(&entity->rq_lock); - spin_lock(&entity->rq->lock); + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); entity->oldest_job_waiting = ts; - rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, + rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, drm_sched_entity_compare_before); - - spin_unlock(&entity->rq->lock); - spin_unlock(&entity->rq_lock); } /** @@ -219,15 +217,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, void drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); + if (!list_empty(&entity->list)) return; - spin_lock(&rq->lock); - atomic_inc(rq->sched->score); list_add_tail(&entity->list, &rq->entities); - - spin_unlock(&rq->lock); } /** @@ -241,6 +238,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + if (list_empty(&entity->list)) return; @@ -253,7 +252,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, rq->current_entity = NULL; if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); spin_unlock(&rq->lock); } @@ -355,7 +354,6 @@ drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, return ERR_PTR(-ENOSPC); } - rq->current_entity = entity; reinit_completion(&entity->entity_idle); break; } @@ -601,6 +599,9 @@ static void drm_sched_job_timedout(struct work_struct *work) * callers responsibility to release it manually if it's not part of the * pending list any more. * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler teardown, i.e., before calling drm_sched_fini(). */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { @@ -673,16 +674,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) */ cancel_delayed_work(&sched->work_tdr); } - EXPORT_SYMBOL(drm_sched_stop); /** * drm_sched_start - recover jobs after a reset * * @sched: scheduler instance + * @errno: error to set on the pending fences * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler startup. The scheduler itself is fully operational after + * drm_sched_init() succeeded. */ -void drm_sched_start(struct drm_gpu_scheduler *sched) +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { struct drm_sched_job *s_job, *tmp; @@ -697,13 +702,13 @@ void drm_sched_start(struct drm_gpu_scheduler *sched) atomic_add(s_job->credits, &sched->credit_count); if (!fence) { - drm_sched_job_done(s_job, -ECANCELED); + drm_sched_job_done(s_job, errno ?: -ECANCELED); continue; } if (dma_fence_add_callback(fence, &s_job->cb, drm_sched_job_done_cb)) - drm_sched_job_done(s_job, fence->error); + drm_sched_job_done(s_job, fence->error ?: errno); } drm_sched_start_timeout_unlocked(sched); @@ -778,6 +783,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * Drivers must make sure drm_sched_job_cleanup() if this function returns * successfully, even when @job is aborted before drm_sched_job_arm() is called. * + * Note that this function does not assign a valid value to each struct member + * of struct drm_sched_job. Take a look at that struct's documentation to see + * who sets which struct member with what lifetime. + * * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware * has died, which can mean that there's no valid runqueue for a @entity. * This function returns -ENOENT in this case (which probably should be -EIO as @@ -803,6 +812,14 @@ int drm_sched_job_init(struct drm_sched_job *job, return -EINVAL; } + /* + * We don't know for sure how the user has allocated. Thus, zero the + * struct so that unallowed (i.e., too early) usage of pointers that + * this function does not set is guaranteed to lead to a NULL pointer + * exception instead of UB. + */ + memset(job, 0, sizeof(*job)); + job->entity = entity; job->credits = credits; job->s_fence = drm_sched_fence_alloc(entity, owner); @@ -1333,6 +1350,19 @@ EXPORT_SYMBOL(drm_sched_init); * @sched: scheduler instance * * Tears down and cleans up the scheduler. + * + * This stops submission of new jobs to the hardware through + * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() + * will not be called for all jobs still in drm_gpu_scheduler.pending_list. + * There is no solution for this currently. Thus, it is up to the driver to make + * sure that + * a) drm_sched_fini() is only called after for all submitted jobs + * drm_sched_backend_ops.free_job() has been called or that + * b) the jobs for which drm_sched_backend_ops.free_job() has not been called + * after drm_sched_fini() ran are freed manually. + * + * FIXME: Take care of the above problem and prevent this function from leaking + * the jobs in drm_gpu_scheduler.pending_list under any circumstances. */ void drm_sched_fini(struct drm_gpu_scheduler *sched) { @@ -1348,7 +1378,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) list_for_each_entry(s_entity, &rq->entities, list) /* * Prevents reinsertion and marks job_queue as idle, - * it will removed from rq in drm_sched_entity_fini + * it will be removed from the rq in drm_sched_entity_fini() * eventually */ s_entity->stopped = true; @@ -1428,8 +1458,10 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready); /** * drm_sched_wqueue_stop - stop scheduler submission - * * @sched: scheduler instance + * + * Stops the scheduler from pulling new jobs from entities. It also stops + * freeing jobs automatically through drm_sched_backend_ops.free_job(). */ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { @@ -1441,8 +1473,12 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop); /** * drm_sched_wqueue_start - start scheduler submission - * * @sched: scheduler instance + * + * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. + * + * This function is not necessary for 'conventional' startup. The scheduler is + * fully operational after drm_sched_init() succeeded. */ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig index c3ee956c2bb9..400a6cab3a67 100644 --- a/drivers/gpu/drm/solomon/Kconfig +++ b/drivers/gpu/drm/solomon/Kconfig @@ -2,6 +2,7 @@ config DRM_SSD130X tristate "DRM support for Solomon SSD13xx OLED displays" depends on DRM && MMU select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 6f51bcf774e2..29b2f82d81f8 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> @@ -1780,6 +1781,7 @@ DEFINE_DRM_GEM_FOPS(ssd130x_fops); static const struct drm_driver ssd130x_drm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -2029,7 +2031,7 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap) if (ret) return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n")); - drm_fbdev_shmem_setup(drm, 32); + drm_client_setup(drm, NULL); return ssd130x; } diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 0b69c140eab3..44a7a579660f 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -209,7 +209,7 @@ static int regmap_tst_io_read(void *context, u32 reg, u32 *val) return 0; } -static struct regmap_bus regmap_tst_io = { +static const struct regmap_bus regmap_tst_io = { .reg_write = regmap_tst_io_write, .reg_read = regmap_tst_io_read, }; diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 75c301aadcbc..ec341a4720d4 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -3,6 +3,7 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH4xx Series" depends on OF && DRM && (ARCH_STI || COMPILE_TEST) select RESET_CONTROLLER + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index db0a1eb53532..c59fcb4dca32 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -200,6 +200,9 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 1799c12babf5..65f180c8e8e2 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -136,6 +137,7 @@ static const struct drm_driver sti_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &sti_driver_fops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = sti_drm_dbg_init, @@ -203,7 +205,7 @@ static int sti_bind(struct device *dev) drm_mode_config_reset(ddev); - drm_fbdev_dma_setup(ddev, 32); + drm_client_setup(ddev, NULL); return 0; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 43c72c2604a0..f046f5f7ad25 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -638,6 +638,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, mixer = to_sti_mixer(crtc); crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index acbf70b95aeb..5793cf2cb897 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1037,6 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index d7f41a87808e..635be0ac00af 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -3,6 +3,7 @@ config DRM_STM tristate "DRM Support for STMicroelectronics SoC Series" depends on DRM && (ARCH_STM32 || COMPILE_TEST) depends on COMMON_CLK + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index e1232f74dfa5..478dc129d5c2 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -8,6 +8,7 @@ * Mickael Reulier <mickael.reulier@st.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/mod_devicetable.h> @@ -15,11 +16,12 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -66,6 +68,7 @@ static const struct drm_driver drv_driver = { .patchlevel = 0, .fops = &drv_driver_fops, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static int drv_load(struct drm_device *ddev) @@ -188,7 +191,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) DRM_DEBUG("%s\n", __func__); - ret = drm_aperture_remove_framebuffers(&drv_driver); + ret = aperture_remove_all_conflicting_devices(drv_driver.name); if (ret) return ret; @@ -206,7 +209,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(ddev, 16); + drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 4037e085430e..b56ba00aabca 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -3,6 +3,7 @@ config DRM_SUN4I tristate "DRM Support for Allwinner A10 Display Engine" depends on DRM && COMMON_CLK depends on ARCH_SUNXI || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 35d7a7ffd208..3f880d8a5666 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -6,6 +6,7 @@ * Maxime Ripard <maxime.ripard@free-electrons.com> */ +#include <linux/aperture.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/kfifo.h> @@ -14,8 +15,8 @@ #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> @@ -55,6 +56,7 @@ static const struct drm_driver sun4i_drv_driver = { /* GEM Operations */ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, }; static int sun4i_drv_bind(struct device *dev) @@ -98,7 +100,7 @@ static int sun4i_drv_bind(struct device *dev) goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ - ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver); + ret = aperture_remove_all_conflicting_devices(sun4i_drv_driver.name); if (ret) goto unbind_all; @@ -111,7 +113,7 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto finish_poll; - drm_fbdev_dma_setup(drm, 32); + drm_client_setup(drm, NULL); dev_set_drvdata(dev, drm); diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index e688d8104652..8a3b16aac5d6 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -5,6 +5,7 @@ config DRM_TEGRA depends on COMMON_CLK depends on DRM depends on OF + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 34d22ba210b0..bf3421667ecc 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -4,6 +4,7 @@ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. */ +#include <linux/aperture.h> #include <linux/bitops.h> #include <linux/host1x.h> #include <linux/idr.h> @@ -12,9 +13,9 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> @@ -892,6 +893,8 @@ static const struct drm_driver tegra_drm_driver = { .dumb_create = tegra_bo_dumb_create, + TEGRA_FBDEV_DRIVER_OPS, + .ioctls = tegra_drm_ioctls, .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), .fops = &tegra_drm_fops, @@ -1255,7 +1258,7 @@ static int host1x_drm_probe(struct host1x_device *dev) * will not expose any modesetting features. */ if (drm->mode_config.num_crtc > 0) { - err = drm_aperture_remove_framebuffers(&tegra_drm_driver); + err = aperture_remove_all_conflicting_devices(tegra_drm_driver.name); if (err < 0) goto hub; } else { @@ -1270,7 +1273,7 @@ static int host1x_drm_probe(struct host1x_device *dev) if (err < 0) goto hub; - tegra_fbdev_setup(drm); + drm_client_setup(drm, NULL); return 0; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 2f3781e04b0a..0b65e69f3a8a 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -25,6 +25,9 @@ /* XXX move to include/uapi/drm/drm_fourcc.h? */ #define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22) +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + struct edid; struct reset_control; @@ -190,10 +193,13 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, const struct drm_mode_fb_cmd2 *cmd); #ifdef CONFIG_DRM_FBDEV_EMULATION -void tegra_fbdev_setup(struct drm_device *drm); +int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +#define TEGRA_FBDEV_DRIVER_OPS \ + .fbdev_probe = tegra_fbdev_driver_fbdev_probe #else -static inline void tegra_fbdev_setup(struct drm_device *drm) -{ } +#define TEGRA_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL #endif extern struct platform_driver tegra_display_hub_driver; diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c index db6eaac3d30e..cd9d798f8870 100644 --- a/drivers/gpu/drm/tegra/fbdev.c +++ b/drivers/gpu/drm/tegra/fbdev.c @@ -66,8 +66,11 @@ static const struct fb_ops tegra_fb_ops = { .fb_destroy = tegra_fbdev_fb_destroy, }; -static int tegra_fbdev_probe(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +static const struct drm_fb_helper_funcs tegra_fbdev_helper_funcs = { +}; + +int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) { struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; @@ -112,6 +115,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, return PTR_ERR(fb); } + helper->funcs = &tegra_fbdev_helper_funcs; helper->fb = fb; helper->info = info; @@ -144,93 +148,3 @@ destroy: drm_framebuffer_remove(fb); return err; } - -static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = { - .fb_probe = tegra_fbdev_probe, -}; - -/* - * struct drm_client - */ - -static void tegra_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->info) { - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int tegra_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int tegra_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs tegra_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = tegra_fbdev_client_unregister, - .restore = tegra_fbdev_client_restore, - .hotplug = tegra_fbdev_client_hotplug, -}; - -void tegra_fbdev_setup(struct drm_device *dev) -{ - struct drm_fb_helper *helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - helper = kzalloc(sizeof(*helper), GFP_KERNEL); - if (!helper) - return; - drm_fb_helper_prepare(dev, helper, 32, &tegra_fb_helper_funcs); - - ret = drm_client_init(dev, &helper->client, "fbdev", &tegra_fbdev_client_funcs); - if (ret) - goto err_drm_client_init; - - drm_client_register(&helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(helper); - kfree(helper); -} diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b4eb030ea961..d275404ad0e9 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -76,8 +76,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_ /* * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. */ - if (gem->import_attach) { - struct dma_buf *buf = gem->import_attach->dmabuf; + if (obj->dma_buf) { + struct dma_buf *buf = obj->dma_buf; map->attach = dma_buf_attach(buf, dev); if (IS_ERR(map->attach)) { @@ -184,8 +184,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) if (obj->vaddr) return obj->vaddr; - if (obj->gem.import_attach) { - ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) { + ret = dma_buf_vmap_unlocked(obj->dma_buf, &map); if (ret < 0) return ERR_PTR(ret); @@ -208,8 +208,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) if (obj->vaddr) return; - if (obj->gem.import_attach) - return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) + return dma_buf_vunmap_unlocked(obj->dma_buf, &map); vunmap(addr); } @@ -465,27 +465,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, if (IS_ERR(bo)) return bo; - attach = dma_buf_attach(buf, drm->dev); - if (IS_ERR(attach)) { - err = PTR_ERR(attach); - goto free; - } - - get_dma_buf(buf); + /* + * If we need to use IOMMU API to map the dma-buf into the internally managed + * domain, map it first to the DRM device to get an sgt. + */ + if (tegra->domain) { + attach = dma_buf_attach(buf, drm->dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto free; + } - bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); - if (IS_ERR(bo->sgt)) { - err = PTR_ERR(bo->sgt); - goto detach; - } + bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); + if (IS_ERR(bo->sgt)) { + err = PTR_ERR(bo->sgt); + goto detach; + } - if (tegra->domain) { err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; + + bo->gem.import_attach = attach; } - bo->gem.import_attach = attach; + get_dma_buf(buf); + bo->dma_buf = buf; return bo; @@ -516,17 +521,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem) dev_name(mapping->dev)); } - if (tegra->domain) + if (tegra->domain) { tegra_bo_iommu_unmap(tegra, bo); - if (gem->import_attach) { - dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, - DMA_TO_DEVICE); - drm_prime_gem_destroy(gem, NULL); - } else { - tegra_bo_free(gem->dev, bo); + if (gem->import_attach) { + dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, + DMA_TO_DEVICE); + dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); + } } + tegra_bo_free(gem->dev, bo); + + if (bo->dma_buf) + dma_buf_put(bo->dma_buf); + drm_gem_object_release(gem); kfree(bo); } diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index cb5146a67668..bf2cbd48eb3f 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -32,6 +32,26 @@ struct tegra_bo_tiling { enum tegra_bo_sector_layout sector_layout; }; +/* + * How memory is referenced within a tegra_bo: + * + * Buffer source | Mapping API(*) | Fields + * ---------------+-----------------+--------------- + * Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA) + * + * Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev), + * | iova/size (Mapped to domain) + * + * Imported | DMA API | dma_buf (Imported dma_buf) + * + * Imported | IOMMU API | dma_buf (Imported dma_buf), + * | gem->import_attach (Attachment on drm->dev), + * | sgt (Mapped to drm->dev) + * | iova/size (Mapped to domain) + * + * (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API, + * this is IOMMU API. Otherwise DMA API. + */ struct tegra_bo { struct drm_gem_object gem; struct host1x_bo base; @@ -39,6 +59,7 @@ struct tegra_bo { struct sg_table *sgt; dma_addr_t iova; void *vaddr; + struct dma_buf *dma_buf; struct drm_mm_node *mm; unsigned long num_pages; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 00c8564520e7..caee824832b3 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,6 +46,7 @@ struct gr3d { unsigned int nclocks; struct reset_control_bulk_data resets[RST_GR3D_MAX]; unsigned int nresets; + struct dev_pm_domain_list *pd_list; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -369,18 +370,13 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, return 0; } -static void gr3d_del_link(void *link) -{ - device_link_del(link); -} - static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) { - static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; - const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; - struct device **opp_virt_devs, *pd_dev; - struct device_link *link; - unsigned int i; + struct dev_pm_domain_attach_data pd_data = { + .pd_names = (const char *[]) { "3d0", "3d1" }, + .num_pd_names = 2, + .pd_flags = PD_FLAG_REQUIRED_OPP, + }; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -414,29 +410,10 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) if (dev->pm_domain) return 0; - err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); - if (err) + err = devm_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list); + if (err < 0) return err; - for (i = 0; opp_genpd_names[i]; i++) { - pd_dev = opp_virt_devs[i]; - if (!pd_dev) { - dev_err(dev, "failed to get %s power domain\n", - opp_genpd_names[i]); - return -EINVAL; - } - - link = device_link_add(dev, pd_dev, link_flags); - if (!link) { - dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); - return -EINVAL; - } - - err = devm_add_action_or_reset(dev, gr3d_del_link, link); - if (err) - return err; - } - return 0; } diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 09987e372e3e..6bf2dae82ca0 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -434,7 +434,7 @@ tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock, static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) { - const unsigned int freqs[] = { + static const unsigned int freqs[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; unsigned int i; diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c index 06f03b78c9c4..6ea04cc8f324 100644 --- a/drivers/gpu/drm/tests/drm_framebuffer_test.c +++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c @@ -5,11 +5,15 @@ * Copyright (c) 2022 MaÃra Canal <mairacanal@riseup.net> */ +#include <kunit/device.h> #include <kunit/test.h> #include <drm/drm_device.h> +#include <drm/drm_drv.h> #include <drm/drm_mode.h> +#include <drm/drm_framebuffer.h> #include <drm/drm_fourcc.h> +#include <drm/drm_kunit_helpers.h> #include <drm/drm_print.h> #include "../drm_crtc_internal.h" @@ -19,6 +23,8 @@ #define MIN_HEIGHT 4 #define MAX_HEIGHT 4096 +#define DRM_MODE_FB_INVALID BIT(2) + struct drm_framebuffer_test { int buffer_created; struct drm_mode_fb_cmd2 cmd; @@ -83,6 +89,24 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { .pitches = { 4 * MAX_WIDTH, 0, 0 }, } }, + +/* + * All entries in members that represents per-plane values (@modifier, @handles, + * @pitches and @offsets) must be zero when unused. + */ +{ .buffer_created = 0, .name = "ABGR8888 Buffer offset for inexistent plane", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, + .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, UINT_MAX / 2, 0 }, + .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS, + } +}, + +{ .buffer_created = 0, .name = "ABGR8888 Invalid flag", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, + .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, + .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_INVALID, + } +}, { .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers", .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, @@ -262,6 +286,13 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) }, } }, +{ .buffer_created = 0, .name = "YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)", + .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YUV420_10BIT, + .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS, + .modifier = { DRM_FORMAT_MOD_LINEAR, 0, 0 }, + .pitches = { MAX_WIDTH, 0, 0 }, + } +}, { .buffer_created = 1, .name = "X0L2 Normal sizes", .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 } @@ -317,12 +348,26 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { }, }; +/* + * This struct is intended to provide a way to mocked functions communicate + * with the outer test when it can't be achieved by using its return value. In + * this way, the functions that receive the mocked drm_device, for example, can + * grab a reference to this and actually return something to be used on some + * expectation. + */ +struct drm_framebuffer_test_priv { + struct drm_device dev; + bool buffer_created; + bool buffer_freed; +}; + static struct drm_framebuffer *fb_create_mock(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - int *buffer_created = dev->dev_private; - *buffer_created = 1; + struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev); + + priv->buffer_created = true; return ERR_PTR(-EINVAL); } @@ -332,42 +377,338 @@ static struct drm_mode_config_funcs mock_config_funcs = { static int drm_framebuffer_test_init(struct kunit *test) { - struct drm_device *mock; + struct device *parent; + struct drm_framebuffer_test_priv *priv; + struct drm_device *dev; + + parent = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); - mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock); + priv = drm_kunit_helper_alloc_drm_device(test, parent, typeof(*priv), + dev, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + dev = &priv->dev; - mock->mode_config.min_width = MIN_WIDTH; - mock->mode_config.max_width = MAX_WIDTH; - mock->mode_config.min_height = MIN_HEIGHT; - mock->mode_config.max_height = MAX_HEIGHT; - mock->mode_config.funcs = &mock_config_funcs; + dev->mode_config.min_width = MIN_WIDTH; + dev->mode_config.max_width = MAX_WIDTH; + dev->mode_config.min_height = MIN_HEIGHT; + dev->mode_config.max_height = MAX_HEIGHT; + dev->mode_config.funcs = &mock_config_funcs; - test->priv = mock; + test->priv = priv; return 0; } static void drm_test_framebuffer_create(struct kunit *test) { const struct drm_framebuffer_test *params = test->param_value; - struct drm_device *mock = test->priv; - int buffer_created = 0; + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; - mock->dev_private = &buffer_created; - drm_internal_framebuffer_create(mock, ¶ms->cmd, NULL); - KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created); + priv->buffer_created = false; + drm_internal_framebuffer_create(dev, ¶ms->cmd, NULL); + KUNIT_EXPECT_EQ(test, params->buffer_created, priv->buffer_created); } static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc) { - strcpy(desc, t->name); + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); } KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases, drm_framebuffer_test_to_desc); +/* Tries to create a framebuffer with modifiers without drm_device supporting it */ +static void drm_test_framebuffer_modifiers_not_supported(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer *fb; + + /* A valid cmd with modifier */ + struct drm_mode_fb_cmd2 cmd = { + .width = MAX_WIDTH, .height = MAX_HEIGHT, + .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 }, + .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, + .flags = DRM_MODE_FB_MODIFIERS, + }; + + priv->buffer_created = false; + dev->mode_config.fb_modifiers_not_supported = 1; + + fb = drm_internal_framebuffer_create(dev, &cmd, NULL); + KUNIT_EXPECT_EQ(test, priv->buffer_created, false); + KUNIT_EXPECT_EQ(test, PTR_ERR(fb), -EINVAL); +} + +/* Parameters for testing drm_framebuffer_check_src_coords function */ +struct drm_framebuffer_check_src_coords_case { + const char *name; + const int expect; + const unsigned int fb_size; + const uint32_t src_x; + const uint32_t src_y; + + /* Deltas to be applied on source */ + const uint32_t dsrc_w; + const uint32_t dsrc_h; +}; + +static const struct drm_framebuffer_check_src_coords_case +drm_framebuffer_check_src_coords_cases[] = { + { .name = "Success: source fits into fb", + .expect = 0, + }, + { .name = "Fail: overflowing fb with x-axis coordinate", + .expect = -ENOSPC, .src_x = 1, .fb_size = UINT_MAX, + }, + { .name = "Fail: overflowing fb with y-axis coordinate", + .expect = -ENOSPC, .src_y = 1, .fb_size = UINT_MAX, + }, + { .name = "Fail: overflowing fb with source width", + .expect = -ENOSPC, .dsrc_w = 1, .fb_size = UINT_MAX - 1, + }, + { .name = "Fail: overflowing fb with source height", + .expect = -ENOSPC, .dsrc_h = 1, .fb_size = UINT_MAX - 1, + }, +}; + +static void drm_test_framebuffer_check_src_coords(struct kunit *test) +{ + const struct drm_framebuffer_check_src_coords_case *params = test->param_value; + const uint32_t src_x = params->src_x; + const uint32_t src_y = params->src_y; + const uint32_t src_w = (params->fb_size << 16) + params->dsrc_w; + const uint32_t src_h = (params->fb_size << 16) + params->dsrc_h; + const struct drm_framebuffer fb = { + .width = params->fb_size, + .height = params->fb_size + }; + int ret; + + ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, &fb); + KUNIT_EXPECT_EQ(test, ret, params->expect); +} + +static void +check_src_coords_test_to_desc(const struct drm_framebuffer_check_src_coords_case *t, + char *desc) +{ + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(check_src_coords, drm_framebuffer_check_src_coords_cases, + check_src_coords_test_to_desc); + +/* + * Test if drm_framebuffer_cleanup() really pops out the framebuffer object + * from device's fb_list and decrement the number of framebuffers for that + * device, which is the only things it does. + */ +static void drm_test_framebuffer_cleanup(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct list_head *fb_list = &dev->mode_config.fb_list; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = dev, .format = &format }; + struct drm_framebuffer fb2 = { .dev = dev, .format = &format }; + + /* This will result on [fb_list] -> fb2 -> fb1 */ + drm_framebuffer_init(dev, &fb1, NULL); + drm_framebuffer_init(dev, &fb2, NULL); + + drm_framebuffer_cleanup(&fb1); + + /* Now fb2 is the only one element on fb_list */ + KUNIT_ASSERT_TRUE(test, list_is_singular(&fb2.head)); + KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 1); + + drm_framebuffer_cleanup(&fb2); + + /* Now fb_list is empty */ + KUNIT_ASSERT_TRUE(test, list_empty(fb_list)); + KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 0); +} + +/* + * Initialize a framebuffer, lookup its id and test if the returned reference + * matches. + */ +static void drm_test_framebuffer_lookup(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_format_info format = { }; + struct drm_framebuffer expected_fb = { .dev = dev, .format = &format }; + struct drm_framebuffer *returned_fb; + uint32_t id = 0; + int ret; + + ret = drm_framebuffer_init(dev, &expected_fb, NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + id = expected_fb.base.id; + + /* Looking for expected_fb */ + returned_fb = drm_framebuffer_lookup(dev, NULL, id); + KUNIT_EXPECT_PTR_EQ(test, returned_fb, &expected_fb); + drm_framebuffer_put(returned_fb); + + drm_framebuffer_cleanup(&expected_fb); +} + +/* Try to lookup an id that is not linked to a framebuffer */ +static void drm_test_framebuffer_lookup_inexistent(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer *fb; + uint32_t id = 0; + + /* Looking for an inexistent framebuffer */ + fb = drm_framebuffer_lookup(dev, NULL, id); + KUNIT_EXPECT_NULL(test, fb); +} + +/* + * Test if drm_framebuffer_init initializes the framebuffer successfully, + * asserting that its modeset object struct and its refcount are correctly + * set and that strictly one framebuffer is initialized. + */ +static void drm_test_framebuffer_init(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = dev, .format = &format }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + ret = drm_framebuffer_init(dev, &fb1, &funcs); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* Check if fb->funcs is actually set to the drm_framebuffer_funcs passed on */ + KUNIT_EXPECT_PTR_EQ(test, fb1.funcs, &funcs); + + /* The fb->comm must be set to the current running process */ + KUNIT_EXPECT_STREQ(test, fb1.comm, current->comm); + + /* The fb->base must be successfully initialized */ + KUNIT_EXPECT_NE(test, fb1.base.id, 0); + KUNIT_EXPECT_EQ(test, fb1.base.type, DRM_MODE_OBJECT_FB); + KUNIT_EXPECT_EQ(test, kref_read(&fb1.base.refcount), 1); + KUNIT_EXPECT_PTR_EQ(test, fb1.base.free_cb, &drm_framebuffer_free); + + /* There must be just that one fb initialized */ + KUNIT_EXPECT_EQ(test, dev->mode_config.num_fb, 1); + KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.prev, &fb1.head); + KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.next, &fb1.head); + + drm_framebuffer_cleanup(&fb1); +} + +/* Try to init a framebuffer without setting its format */ +static void drm_test_framebuffer_init_bad_format(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_framebuffer fb1 = { .dev = dev, .format = NULL }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + /* Fails if fb.format isn't set */ + ret = drm_framebuffer_init(dev, &fb1, &funcs); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +/* + * Test calling drm_framebuffer_init() passing a framebuffer linked to a + * different drm_device parent from the one passed on the first argument, which + * must fail. + */ +static void drm_test_framebuffer_init_dev_mismatch(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *right_dev = &priv->dev; + struct drm_device *wrong_dev; + struct device *wrong_dev_parent; + struct drm_format_info format = { }; + struct drm_framebuffer fb1 = { .dev = right_dev, .format = &format }; + struct drm_framebuffer_funcs funcs = { }; + int ret; + + wrong_dev_parent = kunit_device_register(test, "drm-kunit-wrong-device-mock"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev_parent); + + wrong_dev = __drm_kunit_helper_alloc_drm_device(test, wrong_dev_parent, + sizeof(struct drm_device), + 0, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev); + + /* Fails if fb->dev doesn't point to the drm_device passed on first arg */ + ret = drm_framebuffer_init(wrong_dev, &fb1, &funcs); + KUNIT_EXPECT_EQ(test, ret, -EINVAL); +} + +static void destroy_free_mock(struct drm_framebuffer *fb) +{ + struct drm_framebuffer_test_priv *priv = container_of(fb->dev, typeof(*priv), dev); + + priv->buffer_freed = true; +} + +static struct drm_framebuffer_funcs framebuffer_funcs_free_mock = { + .destroy = destroy_free_mock, +}; + +/* + * In summary, the drm_framebuffer_free() function must implicitly call + * fb->funcs->destroy() and garantee that the framebufer object is unregistered + * from the drm_device idr pool. + */ +static void drm_test_framebuffer_free(struct kunit *test) +{ + struct drm_framebuffer_test_priv *priv = test->priv; + struct drm_device *dev = &priv->dev; + struct drm_mode_object *obj; + struct drm_framebuffer fb = { + .dev = dev, + .funcs = &framebuffer_funcs_free_mock, + }; + int id, ret; + + priv->buffer_freed = false; + + /* + * Mock a framebuffer that was not unregistered at the moment of the + * drm_framebuffer_free() call. + */ + ret = drm_mode_object_add(dev, &fb.base, DRM_MODE_OBJECT_FB); + KUNIT_ASSERT_EQ(test, ret, 0); + id = fb.base.id; + + drm_framebuffer_free(&fb.base.refcount); + + /* The framebuffer object must be unregistered */ + obj = drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_FB); + KUNIT_EXPECT_PTR_EQ(test, obj, NULL); + KUNIT_EXPECT_EQ(test, fb.base.id, 0); + + /* Test if fb->funcs->destroy() was called */ + KUNIT_EXPECT_EQ(test, priv->buffer_freed, true); +} + static struct kunit_case drm_framebuffer_tests[] = { + KUNIT_CASE_PARAM(drm_test_framebuffer_check_src_coords, check_src_coords_gen_params), + KUNIT_CASE(drm_test_framebuffer_cleanup), KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params), + KUNIT_CASE(drm_test_framebuffer_free), + KUNIT_CASE(drm_test_framebuffer_init), + KUNIT_CASE(drm_test_framebuffer_init_bad_format), + KUNIT_CASE(drm_test_framebuffer_init_dev_mismatch), + KUNIT_CASE(drm_test_framebuffer_lookup), + KUNIT_CASE(drm_test_framebuffer_lookup_inexistent), + KUNIT_CASE(drm_test_framebuffer_modifiers_not_supported), { } }; diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig index 2385c56493b9..31ad582b7602 100644 --- a/drivers/gpu/drm/tidss/Kconfig +++ b/drivers/gpu/drm/tidss/Kconfig @@ -2,6 +2,7 @@ config DRM_TIDSS tristate "DRM Support for TI Keystone" depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index d15f836dca95..2428b9aaa003 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -11,6 +11,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -109,6 +110,7 @@ static const struct drm_driver tidss_driver = { .fops = &tidss_fops, .release = tidss_release, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "tidss", .desc = "TI Keystone DSS", .date = "20180215", @@ -186,7 +188,7 @@ static int tidss_probe(struct platform_device *pdev) goto err_irq_uninstall; } - drm_fbdev_dma_setup(ddev, 32); + drm_client_setup(ddev, NULL); dev_dbg(dev, "%s done\n", __func__); diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index d3bd2d7a181e..24f9a245ba59 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -2,6 +2,7 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_BRIDGE diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index cd5eefa06060..8c9f3705aa6c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -14,6 +14,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> @@ -374,7 +375,8 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) goto init_failed; priv->is_registered = true; - drm_fbdev_dma_setup(ddev, bpp); + drm_client_setup_with_color_mode(ddev, bpp); + return 0; init_failed: @@ -472,6 +474,7 @@ DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver tilcdc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = tilcdc_debugfs_init, #endif diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index f6889f649bc1..94cbdb1337c0 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -3,6 +3,7 @@ config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER help @@ -13,10 +14,9 @@ config DRM_ARCPGU config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM - select DRM_TTM_HELPER help This is a KMS driver for qemu's stdvga output. Choose this option for qemu. @@ -26,6 +26,7 @@ config DRM_BOCHS config DRM_CIRRUS_QEMU tristate "Cirrus driver for QEMU emulated device" depends on DRM && PCI && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help @@ -45,6 +46,7 @@ config DRM_CIRRUS_QEMU config DRM_GM12U320 tristate "GM12U320 driver for USB projectors" depends on DRM && USB && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help @@ -55,6 +57,7 @@ config DRM_OFDRM tristate "Open Firmware display driver" depends on DRM && MMU && OF && (PPC || COMPILE_TEST) select APERTURE_HELPERS + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help @@ -67,6 +70,7 @@ config DRM_OFDRM config DRM_PANEL_MIPI_DBI tristate "DRM support for MIPI DBI compatible panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -83,6 +87,7 @@ config DRM_SIMPLEDRM tristate "Simple framebuffer driver" depends on DRM && MMU select APERTURE_HELPERS + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help @@ -99,6 +104,7 @@ config DRM_SIMPLEDRM config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -113,6 +119,7 @@ config TINYDRM_ILI9163 tristate "DRM support for ILI9163 display panels" depends on DRM && SPI select BACKLIGHT_CLASS_DEVICE + select DRM_CLIENT_SELECTION select DRM_GEM_DMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DBI @@ -125,6 +132,7 @@ config TINYDRM_ILI9163 config TINYDRM_ILI9225 tristate "DRM support for ILI9225 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -137,6 +145,7 @@ config TINYDRM_ILI9225 config TINYDRM_ILI9341 tristate "DRM support for ILI9341 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -150,6 +159,7 @@ config TINYDRM_ILI9341 config TINYDRM_ILI9486 tristate "DRM support for ILI9486 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -164,6 +174,7 @@ config TINYDRM_ILI9486 config TINYDRM_MI0283QT tristate "DRM support for MI0283QT" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -175,6 +186,7 @@ config TINYDRM_MI0283QT config TINYDRM_REPAPER tristate "DRM support for Pervasive Displays RePaper panels (V231)" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER help @@ -186,9 +198,31 @@ config TINYDRM_REPAPER If M is selected the module will be called repaper. +config TINYDRM_SHARP_MEMORY + tristate "DRM support for Sharp Memory LCD panels" + depends on DRM && SPI + select DRM_CLIENT_SELECTION + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + help + DRM Driver for the following Sharp Memory Panels: + * 1.00" Sharp Memory LCD (LS010B7DH04) + * 1.10" Sharp Memory LCD (LS011B7DH03) + * 1.20" Sharp Memory LCD (LS012B7DD01) + * 1.28" Sharp Memory LCD (LS013B7DH03) + * 1.26" Sharp Memory LCD (LS013B7DH05) + * 1.80" Sharp Memory LCD (LS018B7DH02) + * 2.70" Sharp Memory LCD (LS027B7DH01) + * 2.70" Sharp Memory LCD (LS027B7DH01A) + * 3.20" Sharp Memory LCD (LS032B7DD02) + * 4.40" Sharp Memory LCD (LS044Q7DH01) + + If M is selected the module will be called sharp_memory. + config TINYDRM_ST7586 tristate "DRM support for Sitronix ST7586 display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI @@ -201,6 +235,7 @@ config TINYDRM_ST7586 config TINYDRM_ST7735R tristate "DRM support for Sitronix ST7715R/ST7735R display panels" depends on DRM && SPI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_MIPI_DBI diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 76dde89a044b..4aaf56f8707d 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -14,5 +14,6 @@ obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o +obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o obj-$(CONFIG_TINYDRM_ST7586) += st7586.o obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index 4f8f3172379e..81abedec435d 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -371,6 +372,7 @@ static const struct drm_driver arcpgu_drm_driver = { .patchlevel = 0, .fops = &arcpgu_drm_ops, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, #ifdef CONFIG_DEBUG_FS .debugfs_init = arcpgu_debugfs_init, #endif @@ -394,7 +396,7 @@ static int arcpgu_probe(struct platform_device *pdev) if (ret) goto err_unload; - drm_fbdev_dma_setup(&arcpgu->drm, 16); + drm_client_setup_with_fourcc(&arcpgu->drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 31fc5d839e10..6f91ff1dbf7e 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -1,21 +1,26 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/bug.h> +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fbdev_ttm.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> +#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> #include <video/vga.h> @@ -71,6 +76,8 @@ enum bochs_types { }; struct bochs_device { + struct drm_device dev; + /* hw */ void __iomem *mmio; int ioports; @@ -85,22 +92,32 @@ struct bochs_device { u16 yres_virtual; u32 stride; u32 bpp; - const struct drm_edid *drm_edid; /* drm */ - struct drm_device *dev; - struct drm_simple_display_pipe pipe; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; struct drm_connector connector; }; +static struct bochs_device *to_bochs_device(const struct drm_device *dev) +{ + return container_of(dev, struct bochs_device, dev); +} + /* ---------------------------------------------------------------------- */ +static __always_inline bool bochs_uses_mmio(struct bochs_device *bochs) +{ + return !IS_ENABLED(CONFIG_HAS_IOPORT) || bochs->mmio; +} + static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) { if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) return; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = ioport - 0x3c0 + 0x400; writeb(val, bochs->mmio + offset); @@ -114,7 +131,7 @@ static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport) if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) return 0xff; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = ioport - 0x3c0 + 0x400; return readb(bochs->mmio + offset); @@ -127,7 +144,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) { u16 ret = 0; - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = 0x500 + (reg << 1); ret = readw(bochs->mmio + offset); @@ -140,7 +157,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) { - if (bochs->mmio) { + if (bochs_uses_mmio(bochs)) { int offset = 0x500 + (reg << 1); writew(val, bochs->mmio + offset); @@ -172,12 +189,14 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs) #define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b) #endif -static int bochs_get_edid_block(void *data, u8 *buf, - unsigned int block, size_t len) +static int bochs_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct bochs_device *bochs = data; size_t i, start = block * EDID_LENGTH; + if (!bochs->mmio) + return -1; + if (start + len > 0x400 /* vga register offset */) return -1; @@ -187,55 +206,53 @@ static int bochs_get_edid_block(void *data, u8 *buf, return 0; } -static int bochs_hw_load_edid(struct bochs_device *bochs) +static const struct drm_edid *bochs_hw_read_edid(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct bochs_device *bochs = to_bochs_device(dev); u8 header[8]; - if (!bochs->mmio) - return -1; - /* check header to detect whenever edid support is enabled in qemu */ bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); if (drm_edid_header_is_valid(header) != 8) - return -1; + return NULL; - drm_edid_free(bochs->drm_edid); - bochs->drm_edid = drm_edid_read_custom(&bochs->connector, - bochs_get_edid_block, bochs); - if (!bochs->drm_edid) - return -1; + drm_dbg(dev, "Found EDID data blob.\n"); - return 0; + return drm_edid_read_custom(connector, bochs_get_edid_block, bochs); } -static int bochs_hw_init(struct drm_device *dev) +static int bochs_hw_init(struct bochs_device *bochs) { - struct bochs_device *bochs = dev->dev_private; + struct drm_device *dev = &bochs->dev; struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned long addr, size, mem, ioaddr, iosize; u16 id; if (pdev->resource[2].flags & IORESOURCE_MEM) { + ioaddr = pci_resource_start(pdev, 2); + iosize = pci_resource_len(pdev, 2); /* mmio bar with vga and bochs registers present */ - if (pci_request_region(pdev, 2, "bochs-drm") != 0) { + if (!devm_request_mem_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request mmio region\n"); return -EBUSY; } - ioaddr = pci_resource_start(pdev, 2); - iosize = pci_resource_len(pdev, 2); - bochs->mmio = ioremap(ioaddr, iosize); + bochs->mmio = devm_ioremap(&pdev->dev, ioaddr, iosize); if (bochs->mmio == NULL) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; } - } else { + } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) { ioaddr = VBE_DISPI_IOPORT_INDEX; iosize = 2; - if (!request_region(ioaddr, iosize, "bochs-drm")) { + if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request ioports\n"); return -EBUSY; } bochs->ioports = 1; + } else { + dev_err(dev->dev, "I/O ports are not supported\n"); + return -EIO; } id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); @@ -258,10 +275,10 @@ static int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) + if (!devm_request_mem_region(&pdev->dev, addr, size, "bochs-drm")) DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); - bochs->fb_map = ioremap(addr, size); + bochs->fb_map = devm_ioremap_wc(&pdev->dev, addr, size); if (bochs->fb_map == NULL) { DRM_ERROR("Cannot map framebuffer\n"); return -ENOMEM; @@ -290,22 +307,6 @@ noext: return 0; } -static void bochs_hw_fini(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - /* TODO: shot down existing vram mappings */ - - if (bochs->mmio) - iounmap(bochs->mmio); - if (bochs->ioports) - release_region(VBE_DISPI_IOPORT_INDEX, 2); - if (bochs->fb_map) - iounmap(bochs->fb_map); - pci_release_regions(to_pci_dev(dev->dev)); - drm_edid_free(bochs->drm_edid); -} - static void bochs_hw_blank(struct bochs_device *bochs, bool blank) { DRM_DEBUG_DRIVER("hw_blank %d\n", blank); @@ -321,7 +322,7 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->xres = mode->hdisplay; @@ -357,7 +358,7 @@ static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_form { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; DRM_DEBUG_DRIVER("format %c%c%c%c\n", @@ -388,7 +389,7 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid unsigned long offset; unsigned int vx, vy, vwidth, idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->stride = stride; @@ -410,83 +411,156 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid /* ---------------------------------------------------------------------- */ -static const uint32_t bochs_formats[] = { +static const uint32_t bochs_primary_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_BGRX8888, }; -static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state) +static int bochs_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { - struct drm_gem_vram_object *gbo; - s64 gpu_addr; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; - if (!state->fb || !bochs->stride) + return 0; +} + +static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct bochs_device *bochs = to_bochs_device(dev); + struct drm_plane_state *plane_state = plane->state; + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + + if (!fb || !bochs->stride) return; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); - gpu_addr = drm_gem_vram_offset(gbo); - if (WARN_ON_ONCE(gpu_addr < 0)) - return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map); + + iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, &damage)); + drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage); + } + /* Always scanout image at VRAM offset 0 */ bochs_hw_setbase(bochs, - state->crtc_x, - state->crtc_y, - state->fb->pitches[0], - state->fb->offsets[0] + gpu_addr); - bochs_hw_setformat(bochs, state->fb->format); + plane_state->crtc_x, + plane_state->crtc_y, + fb->pitches[0], + 0); + bochs_hw_setformat(bochs, fb->format); } -static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) +static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = bochs_primary_plane_helper_atomic_check, + .atomic_update = bochs_primary_plane_helper_atomic_update, +}; + +static const struct drm_plane_funcs bochs_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS +}; + +static void bochs_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct bochs_device *bochs = to_bochs_device(crtc->dev); + struct drm_crtc_state *crtc_state = crtc->state; bochs_hw_setmode(bochs, &crtc_state->mode); - bochs_plane_update(bochs, plane_state); } -static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe) +static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - bochs_hw_blank(bochs, true); + if (!crtc_state->enable) + return 0; + + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); +} + +static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ } -static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) +static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *crtc_state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct bochs_device *bochs = to_bochs_device(crtc->dev); - bochs_plane_update(bochs, pipe->plane.state); + bochs_hw_blank(bochs, true); } -static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { - .enable = bochs_pipe_enable, - .disable = bochs_pipe_disable, - .update = bochs_pipe_update, - .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, - .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, +static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = { + .mode_set_nofb = bochs_crtc_helper_mode_set_nofb, + .atomic_check = bochs_crtc_helper_atomic_check, + .atomic_enable = bochs_crtc_helper_atomic_enable, + .atomic_disable = bochs_crtc_helper_atomic_disable, }; -static int bochs_connector_get_modes(struct drm_connector *connector) +static const struct drm_crtc_funcs bochs_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs bochs_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int bochs_connector_helper_get_modes(struct drm_connector *connector) { + const struct drm_edid *edid; int count; - count = drm_edid_connector_add_modes(connector); + edid = bochs_hw_read_edid(connector); - if (!count) { + if (edid) { + drm_edid_connector_update(connector, edid); + count = drm_edid_connector_add_modes(connector); + drm_edid_free(edid); + } else { + drm_edid_connector_update(connector, NULL); count = drm_add_modes_noedid(connector, 8192, 8192); drm_set_preferred_mode(connector, defx, defy); } + return count; } -static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { - .get_modes = bochs_connector_get_modes, +static const struct drm_connector_helper_funcs bochs_connector_helper_funcs = { + .get_modes = bochs_connector_helper_get_modes, }; -static const struct drm_connector_funcs bochs_connector_connector_funcs = { +static const struct drm_connector_funcs bochs_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, @@ -494,68 +568,89 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static void bochs_connector_init(struct drm_device *dev) +static enum drm_mode_status bochs_mode_config_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode) { - struct bochs_device *bochs = dev->dev_private; - struct drm_connector *connector = &bochs->connector; - - drm_connector_init(dev, connector, &bochs_connector_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs); - - bochs_hw_load_edid(bochs); - if (bochs->drm_edid) { - DRM_INFO("Found EDID data blob.\n"); - drm_connector_attach_edid_property(connector); - drm_edid_connector_update(&bochs->connector, bochs->drm_edid); - } -} + struct bochs_device *bochs = to_bochs_device(dev); + const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888); + u64 pitch; -static struct drm_framebuffer * -bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && - mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) - return ERR_PTR(-EINVAL); + if (drm_WARN_ON(dev, !format)) + return MODE_ERROR; + + pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay); + if (!pitch) + return MODE_BAD_WIDTH; + if (mode->vdisplay > DIV_ROUND_DOWN_ULL(bochs->fb_size, pitch)) + return MODE_MEM; - return drm_gem_fb_create(dev, file, mode_cmd); + return MODE_OK; } -static const struct drm_mode_config_funcs bochs_mode_funcs = { - .fb_create = bochs_gem_fb_create, - .mode_valid = drm_vram_helper_mode_valid, +static const struct drm_mode_config_funcs bochs_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .mode_valid = bochs_mode_config_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int bochs_kms_init(struct bochs_device *bochs) { + struct drm_device *dev = &bochs->dev; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_encoder *encoder; int ret; - ret = drmm_mode_config_init(bochs->dev); + ret = drmm_mode_config_init(dev); if (ret) return ret; - bochs->dev->mode_config.max_width = 8192; - bochs->dev->mode_config.max_height = 8192; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; - bochs->dev->mode_config.preferred_depth = 24; - bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + dev->mode_config.preferred_depth = 24; + dev->mode_config.quirk_addfb_prefer_host_byte_order = true; - bochs->dev->mode_config.funcs = &bochs_mode_funcs; + dev->mode_config.funcs = &bochs_mode_config_funcs; - bochs_connector_init(bochs->dev); - drm_simple_display_pipe_init(bochs->dev, - &bochs->pipe, - &bochs_pipe_funcs, - bochs_formats, - ARRAY_SIZE(bochs_formats), - NULL, - &bochs->connector); + primary_plane = &bochs->primary_plane; + ret = drm_universal_plane_init(dev, primary_plane, 0, + &bochs_primary_plane_funcs, + bochs_primary_plane_formats, + ARRAY_SIZE(bochs_primary_plane_formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + drm_plane_helper_add(primary_plane, &bochs_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); - drm_mode_config_reset(bochs->dev); + crtc = &bochs->crtc; + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &bochs_crtc_funcs, NULL); + if (ret) + return ret; + drm_crtc_helper_add(crtc, &bochs_crtc_helper_funcs); + + encoder = &bochs->encoder; + ret = drm_encoder_init(dev, encoder, &bochs_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + connector = &bochs->connector; + ret = drm_connector_init(dev, connector, &bochs_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) + return ret; + drm_connector_helper_add(connector, &bochs_connector_helper_funcs); + drm_connector_attach_edid_property(connector); + drm_connector_attach_encoder(connector, encoder); + + drm_mode_config_reset(dev); return 0; } @@ -563,34 +658,19 @@ static int bochs_kms_init(struct bochs_device *bochs) /* ---------------------------------------------------------------------- */ /* drm interface */ -static int bochs_load(struct drm_device *dev) +static int bochs_load(struct bochs_device *bochs) { - struct bochs_device *bochs; int ret; - bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); - if (bochs == NULL) - return -ENOMEM; - dev->dev_private = bochs; - bochs->dev = dev; - - ret = bochs_hw_init(dev); + ret = bochs_hw_init(bochs); if (ret) return ret; - ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size); - if (ret) - goto err_hw_fini; - ret = bochs_kms_init(bochs); if (ret) - goto err_hw_fini; + return ret; return 0; - -err_hw_fini: - bochs_hw_fini(dev); - return ret; } DEFINE_DRM_GEM_FOPS(bochs_fops); @@ -603,7 +683,8 @@ static const struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - DRM_GEM_VRAM_DRIVER, + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; /* ---------------------------------------------------------------------- */ @@ -635,23 +716,18 @@ static const struct dev_pm_ops bochs_pm_ops = { static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct bochs_device *bochs; struct drm_device *dev; - unsigned long fbsize; int ret; - fbsize = pci_resource_len(pdev, 0); - if (fbsize < 4 * 1024 * 1024) { - DRM_ERROR("less than 4 MB video memory, ignoring device\n"); - return -ENOMEM; - } - - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, bochs_driver.name); if (ret) return ret; - dev = drm_dev_alloc(&bochs_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev); + if (IS_ERR(bochs)) + return PTR_ERR(bochs); + dev = &bochs->dev; ret = pcim_enable_device(pdev); if (ret) @@ -659,19 +735,18 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent pci_set_drvdata(pdev, dev); - ret = bochs_load(dev); + ret = bochs_load(bochs); if (ret) goto err_free_dev; ret = drm_dev_register(dev, 0); if (ret) - goto err_hw_fini; + goto err_free_dev; + + drm_client_setup(dev, NULL); - drm_fbdev_ttm_setup(dev, 32); return ret; -err_hw_fini: - bochs_hw_fini(dev); err_free_dev: drm_dev_put(dev); return ret; @@ -683,7 +758,6 @@ static void bochs_pci_remove(struct pci_dev *pdev) drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); - bochs_hw_fini(dev); drm_dev_put(dev); } diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 751326e3d9c3..4d2adcaeaa60 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,6 +16,7 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ +#include <linux/aperture.h> #include <linux/iosys-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -23,10 +24,10 @@ #include <video/cirrus.h> #include <video/vga.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -509,8 +510,10 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc, cirrus_mode_set(cirrus, &crtc_state->mode); +#ifdef CONFIG_HAS_IOPORT /* Unblank (needed on S3 resume, vgabios doesn't do it then) */ outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W); +#endif drm_dev_exit(idx); } @@ -662,6 +665,7 @@ static const struct drm_driver cirrus_driver = { .fops = &cirrus_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static int cirrus_pci_probe(struct pci_dev *pdev, @@ -671,7 +675,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, struct cirrus_device *cirrus; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, cirrus_driver.name); if (ret) return ret; @@ -716,7 +720,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, if (ret) return ret; - drm_fbdev_shmem_setup(dev, 16); + drm_client_setup(dev, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 0bd7707c053e..0c17ae532fb4 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -632,6 +633,7 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gm12u320_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { @@ -706,7 +708,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface, if (ret) goto err_put_device; - drm_fbdev_shmem_setup(dev, 0); + drm_client_setup(dev, NULL); return 0; diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 2e631282edeb..6b0d1846cfcf 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -17,6 +17,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -194,6 +195,7 @@ static const struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", .desc = "HX8357D", @@ -256,7 +258,7 @@ static int hx8357d_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index 86f9d8834901..5eb39ca1a855 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -8,6 +8,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -113,6 +114,7 @@ static struct drm_driver ili9163_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9163_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9163", .desc = "Ilitek ILI9163", @@ -185,7 +187,7 @@ static int ili9163_probe(struct spi_device *spi) if (ret) return ret; - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index b6b7a49147bf..875e2d09729a 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -17,6 +17,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> @@ -360,6 +361,7 @@ static const struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "ili9225", .desc = "Ilitek ILI9225", .date = "20171106", @@ -426,7 +428,7 @@ static int ili9225_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 8bcada30af71..c1dfdfbbd30c 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -16,6 +16,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -150,6 +151,7 @@ static const struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", .desc = "Ilitek ILI9341", @@ -218,7 +220,7 @@ static int ili9341_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 70d366260041..7e46a720d5e2 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -15,6 +15,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -172,6 +173,7 @@ static const struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9486", .desc = "Ilitek ILI9486", @@ -247,7 +249,7 @@ static int ili9486_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index cdc5423990ca..f1461c55dba6 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -14,6 +14,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -154,6 +155,7 @@ static const struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", .desc = "Multi-Inno MI0283QT", @@ -226,7 +228,7 @@ static int mi0283qt_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 35996f7eedac..220c1244b3c0 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -1,12 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/of_address.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> @@ -1219,7 +1220,7 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, fb_pgbase = round_down(fb_base, PAGE_SIZE); fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE); - ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize); + ret = devm_aperture_acquire_for_platform_device(pdev, fb_pgbase, fb_pgsize); if (ret) { drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret); return ERR_PTR(ret); @@ -1344,6 +1345,7 @@ DEFINE_DRM_GEM_FOPS(ofdrm_fops); static struct drm_driver ofdrm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -1361,7 +1363,6 @@ static int ofdrm_probe(struct platform_device *pdev) { struct ofdrm_device *odev; struct drm_device *dev; - unsigned int color_mode; int ret; odev = ofdrm_device_create(&ofdrm_driver, pdev); @@ -1373,11 +1374,7 @@ static int ofdrm_probe(struct platform_device *pdev) if (ret) return ret; - color_mode = drm_format_info_bpp(odev->format, 0); - if (color_mode == 16) - color_mode = odev->format->depth; // can be 15 or 16 - - drm_fbdev_shmem_setup(dev, color_mode); + drm_client_setup(dev, odev->format); return 0; } diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index f753cdffe6f8..e66729b31bd6 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -15,6 +15,7 @@ #include <linux/spi/spi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -264,6 +265,7 @@ static const struct drm_driver panel_mipi_dbi_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &panel_mipi_dbi_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "panel-mipi-dbi", .desc = "MIPI DBI compatible display panel", @@ -388,7 +390,7 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 1f78aa3d26bb..77944eb17b3c 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -22,6 +22,7 @@ #include <linux/thermal.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> @@ -913,6 +914,7 @@ static const struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", .date = "20170405", @@ -1118,7 +1120,7 @@ static int repaper_probe(struct spi_device *spi) DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c new file mode 100644 index 000000000000..2d2315bd6aef --- /dev/null +++ b/drivers/gpu/drm/tiny/sharp-memory.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> +#include <drm/drm_connector.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_dma_helper.h> +#include <drm/drm_fbdev_dma.h> +#include <drm/drm_format_helper.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_rect.h> +#include <linux/bitrev.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/kthread.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pwm.h> +#include <linux/spi/spi.h> + +#define SHARP_MODE_PERIOD 8 +#define SHARP_ADDR_PERIOD 8 +#define SHARP_DUMMY_PERIOD 8 + +#define SHARP_MEMORY_DISPLAY_MAINTAIN_MODE 0 +#define SHARP_MEMORY_DISPLAY_UPDATE_MODE 1 +#define SHARP_MEMORY_DISPLAY_CLEAR_MODE 4 + +enum sharp_memory_model { + LS010B7DH04, + LS011B7DH03, + LS012B7DD01, + LS013B7DH03, + LS013B7DH05, + LS018B7DH02, + LS027B7DH01, + LS027B7DH01A, + LS032B7DD02, + LS044Q7DH01, +}; + +enum sharp_memory_vcom_mode { + SHARP_MEMORY_SOFTWARE_VCOM, + SHARP_MEMORY_EXTERNAL_VCOM, + SHARP_MEMORY_PWM_VCOM +}; + +struct sharp_memory_device { + struct drm_device drm; + struct spi_device *spi; + + const struct drm_display_mode *mode; + + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector connector; + + struct gpio_desc *enable_gpio; + + struct task_struct *sw_vcom_signal; + struct pwm_device *pwm_vcom_signal; + + enum sharp_memory_vcom_mode vcom_mode; + u8 vcom; + + u32 pitch; + u32 tx_buffer_size; + u8 *tx_buffer; + + /* When vcom_mode == "software" a kthread is used to periodically send a + * 'maintain display' message over spi. This mutex ensures tx_buffer access + * and spi bus usage is synchronized in this case. + */ + struct mutex tx_mutex; +}; + +static inline int sharp_memory_spi_write(struct spi_device *spi, void *buf, size_t len) +{ + /* Reverse the bit order */ + for (u8 *b = buf; b < ((u8 *)buf) + len; ++b) + *b = bitrev8(*b); + + return spi_write(spi, buf, len); +} + +static inline struct sharp_memory_device *drm_to_sharp_memory_device(struct drm_device *drm) +{ + return container_of(drm, struct sharp_memory_device, drm); +} + +DEFINE_DRM_GEM_DMA_FOPS(sharp_memory_fops); + +static const struct drm_driver sharp_memory_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &sharp_memory_fops, + DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, + .name = "sharp_memory_display", + .desc = "Sharp Display Memory LCD", + .date = "20231129", + .major = 1, + .minor = 0, +}; + +static inline void sharp_memory_set_tx_buffer_mode(u8 *buffer, u8 mode, u8 vcom) +{ + *buffer = mode | (vcom << 1); +} + +static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer, + struct drm_rect clip, + u32 pitch) +{ + for (u32 line = 0; line < clip.y2; ++line) + buffer[line * pitch] = line + 1; +} + +static void sharp_memory_set_tx_buffer_data(u8 *buffer, + struct drm_framebuffer *fb, + struct drm_rect clip, + u32 pitch, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + struct iosys_map dst, vmap; + struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return; + + iosys_map_set_vaddr(&dst, buffer); + iosys_map_set_vaddr(&vmap, dma_obj->vaddr); + + drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state); + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +} + +static int sharp_memory_update_display(struct sharp_memory_device *smd, + struct drm_framebuffer *fb, + struct drm_rect clip, + struct drm_format_conv_state *fmtcnv_state) +{ + int ret; + u32 pitch = smd->pitch; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + u32 tx_buffer_size = smd->tx_buffer_size; + + mutex_lock(&smd->tx_mutex); + + /* Populate the transmit buffer with frame data */ + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], + SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom); + sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch); + sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state); + + ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_maintain_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_MAINTAIN_MODE, vcom); + tx_buffer[1] = 0; /* Write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static int sharp_memory_clear_display(struct sharp_memory_device *smd) +{ + int ret; + u8 vcom = smd->vcom; + u8 *tx_buffer = smd->tx_buffer; + + mutex_lock(&smd->tx_mutex); + + sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_CLEAR_MODE, vcom); + tx_buffer[1] = 0; /* write dummy data */ + ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2); + + mutex_unlock(&smd->tx_mutex); + + return ret; +} + +static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect, + struct drm_format_conv_state *fmtconv_state) +{ + struct drm_rect clip; + struct sharp_memory_device *smd = drm_to_sharp_memory_device(fb->dev); + + /* Always update a full line regardless of what is dirty */ + clip.x1 = 0; + clip.x2 = fb->width; + clip.y1 = rect->y1; + clip.y2 = rect->y2; + + sharp_memory_update_display(smd, fb, clip, fmtconv_state); +} + +static int sharp_memory_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct sharp_memory_device *smd; + struct drm_crtc_state *crtc_state; + + smd = container_of(plane, struct sharp_memory_device, plane); + crtc_state = drm_atomic_get_new_crtc_state(state, &smd->crtc); + + return drm_atomic_helper_check_plane_state(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + +static void sharp_memory_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *plane_state = plane->state; + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + struct sharp_memory_device *smd; + struct drm_rect rect; + + smd = container_of(plane, struct sharp_memory_device, plane); + if (!smd->crtc.state->active) + return; + + if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect)) + sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state); + + drm_format_conv_state_release(&fmtcnv_state); +} + +static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = sharp_memory_plane_atomic_check, + .atomic_update = sharp_memory_plane_atomic_update, +}; + +static bool sharp_memory_format_mod_supported(struct drm_plane *plane, + u32 format, + u64 modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + +static const struct drm_plane_funcs sharp_memory_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = sharp_memory_format_mod_supported, +}; + +static enum drm_mode_status sharp_memory_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, smd->mode); +} + +static int sharp_memory_crtc_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + int ret; + + if (!crtc_state->enable) + goto out; + + ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state); + if (ret) + return ret; + +out: + return drm_atomic_add_affected_planes(state, crtc); +} + +static int sharp_memory_sw_vcom_signal_thread(void *data) +{ + struct sharp_memory_device *smd = data; + + while (!kthread_should_stop()) { + smd->vcom ^= 1; /* Toggle vcom */ + sharp_memory_maintain_display(smd); + msleep(1000); + } + + return 0; +} + +static void sharp_memory_crtc_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 1); +} + +static void sharp_memory_crtc_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev); + + sharp_memory_clear_display(smd); + + if (smd->enable_gpio) + gpiod_set_value(smd->enable_gpio, 0); +} + +static const struct drm_crtc_helper_funcs sharp_memory_crtc_helper_funcs = { + .mode_valid = sharp_memory_crtc_mode_valid, + .atomic_check = sharp_memory_crtc_check, + .atomic_enable = sharp_memory_crtc_enable, + .atomic_disable = sharp_memory_crtc_disable, +}; + +static const struct drm_crtc_funcs sharp_memory_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs sharp_memory_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int sharp_memory_connector_get_modes(struct drm_connector *connector) +{ + struct sharp_memory_device *smd = drm_to_sharp_memory_device(connector->dev); + + return drm_connector_helper_get_modes_fixed(connector, smd->mode); +} + +static const struct drm_connector_helper_funcs sharp_memory_connector_hfuncs = { + .get_modes = sharp_memory_connector_get_modes, +}; + +static const struct drm_connector_funcs sharp_memory_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + +}; + +static const struct drm_mode_config_funcs sharp_memory_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_display_mode sharp_memory_ls010b7dh04_mode = { + DRM_SIMPLE_MODE(128, 128, 18, 18), +}; + +static const struct drm_display_mode sharp_memory_ls011b7dh03_mode = { + DRM_SIMPLE_MODE(160, 68, 25, 10), +}; + +static const struct drm_display_mode sharp_memory_ls012b7dd01_mode = { + DRM_SIMPLE_MODE(184, 38, 29, 6), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh03_mode = { + DRM_SIMPLE_MODE(128, 128, 23, 23), +}; + +static const struct drm_display_mode sharp_memory_ls013b7dh05_mode = { + DRM_SIMPLE_MODE(144, 168, 20, 24), +}; + +static const struct drm_display_mode sharp_memory_ls018b7dh02_mode = { + DRM_SIMPLE_MODE(230, 303, 27, 36), +}; + +static const struct drm_display_mode sharp_memory_ls027b7dh01_mode = { + DRM_SIMPLE_MODE(400, 240, 58, 35), +}; + +static const struct drm_display_mode sharp_memory_ls032b7dd02_mode = { + DRM_SIMPLE_MODE(336, 536, 42, 68), +}; + +static const struct drm_display_mode sharp_memory_ls044q7dh01_mode = { + DRM_SIMPLE_MODE(320, 240, 89, 67), +}; + +static const struct spi_device_id sharp_memory_ids[] = { + {"ls010b7dh04", (kernel_ulong_t)&sharp_memory_ls010b7dh04_mode}, + {"ls011b7dh03", (kernel_ulong_t)&sharp_memory_ls011b7dh03_mode}, + {"ls012b7dd01", (kernel_ulong_t)&sharp_memory_ls012b7dd01_mode}, + {"ls013b7dh03", (kernel_ulong_t)&sharp_memory_ls013b7dh03_mode}, + {"ls013b7dh05", (kernel_ulong_t)&sharp_memory_ls013b7dh05_mode}, + {"ls018b7dh02", (kernel_ulong_t)&sharp_memory_ls018b7dh02_mode}, + {"ls027b7dh01", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls027b7dh01a", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode}, + {"ls032b7dd02", (kernel_ulong_t)&sharp_memory_ls032b7dd02_mode}, + {"ls044q7dh01", (kernel_ulong_t)&sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(spi, sharp_memory_ids); + +static const struct of_device_id sharp_memory_of_match[] = { + {.compatible = "sharp,ls010b7dh04", &sharp_memory_ls010b7dh04_mode}, + {.compatible = "sharp,ls011b7dh03", &sharp_memory_ls011b7dh03_mode}, + {.compatible = "sharp,ls012b7dd01", &sharp_memory_ls012b7dd01_mode}, + {.compatible = "sharp,ls013b7dh03", &sharp_memory_ls013b7dh03_mode}, + {.compatible = "sharp,ls013b7dh05", &sharp_memory_ls013b7dh05_mode}, + {.compatible = "sharp,ls018b7dh02", &sharp_memory_ls018b7dh02_mode}, + {.compatible = "sharp,ls027b7dh01", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls027b7dh01a", &sharp_memory_ls027b7dh01_mode}, + {.compatible = "sharp,ls032b7dd02", &sharp_memory_ls032b7dd02_mode}, + {.compatible = "sharp,ls044q7dh01", &sharp_memory_ls044q7dh01_mode}, + {}, +}; +MODULE_DEVICE_TABLE(of, sharp_memory_of_match); + +static const u32 sharp_memory_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static int sharp_memory_pipe_init(struct drm_device *dev, + struct sharp_memory_device *smd, + const u32 *formats, unsigned int format_count, + const u64 *format_modifiers) +{ + int ret; + struct drm_encoder *encoder = &smd->encoder; + struct drm_plane *plane = &smd->plane; + struct drm_crtc *crtc = &smd->crtc; + struct drm_connector *connector = &smd->connector; + + drm_plane_helper_add(plane, &sharp_memory_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &sharp_memory_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &sharp_memory_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &sharp_memory_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = drm_crtc_mask(crtc); + ret = drm_encoder_init(dev, encoder, &sharp_memory_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + ret = drm_connector_init(&smd->drm, &smd->connector, + &sharp_memory_connector_funcs, + DRM_MODE_CONNECTOR_SPI); + if (ret) + return ret; + + drm_connector_helper_add(&smd->connector, + &sharp_memory_connector_hfuncs); + + return drm_connector_attach_encoder(connector, encoder); +} + +static int sharp_memory_init_pwm_vcom_signal(struct sharp_memory_device *smd) +{ + int ret; + struct device *dev = &smd->spi->dev; + struct pwm_state pwm_state; + + smd->pwm_vcom_signal = devm_pwm_get(dev, NULL); + if (IS_ERR(smd->pwm_vcom_signal)) + return dev_err_probe(dev, PTR_ERR(smd->pwm_vcom_signal), + "Could not get pwm device\n"); + + pwm_init_state(smd->pwm_vcom_signal, &pwm_state); + pwm_set_relative_duty_cycle(&pwm_state, 1, 10); + pwm_state.enabled = true; + ret = pwm_apply_might_sleep(smd->pwm_vcom_signal, &pwm_state); + if (ret) + return dev_err_probe(dev, -EINVAL, "Could not apply pwm state\n"); + + return 0; +} + +static int sharp_memory_probe(struct spi_device *spi) +{ + int ret; + struct device *dev; + struct sharp_memory_device *smd; + struct drm_device *drm; + const char *vcom_mode_str; + + dev = &spi->dev; + + ret = spi_setup(spi); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to setup spi device\n"); + + if (!dev->coherent_dma_mask) { + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return dev_err_probe(dev, ret, "Failed to set dma mask\n"); + } + + smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver, + struct sharp_memory_device, drm); + if (!smd) + return -ENOMEM; + + spi_set_drvdata(spi, smd); + + smd->spi = spi; + drm = &smd->drm; + ret = drmm_mode_config_init(drm); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize drm config\n"); + + smd->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + if (!smd->enable_gpio) + dev_warn(dev, "Enable gpio not defined\n"); + + drm->mode_config.funcs = &sharp_memory_mode_config_funcs; + smd->mode = spi_get_device_match_data(spi); + + smd->pitch = (SHARP_ADDR_PERIOD + smd->mode->hdisplay + SHARP_DUMMY_PERIOD) / 8; + smd->tx_buffer_size = (SHARP_MODE_PERIOD + + (SHARP_ADDR_PERIOD + (smd->mode->hdisplay) + SHARP_DUMMY_PERIOD) * + smd->mode->vdisplay) / 8; + + smd->tx_buffer = devm_kzalloc(dev, smd->tx_buffer_size, GFP_KERNEL); + if (!smd->tx_buffer) + return -ENOMEM; + + mutex_init(&smd->tx_mutex); + + /* + * VCOM is a signal that prevents DC bias from being built up in + * the panel resulting in pixels being forever stuck in one state. + * + * This driver supports three different methods to generate this + * signal depending on EXTMODE pin: + * + * software (EXTMODE = L) - This mode uses a kthread to + * periodically send a "maintain display" message to the display, + * toggling the vcom bit on and off with each message + * + * external (EXTMODE = H) - This mode relies on an external + * clock to generate the signal on the EXTCOMM pin + * + * pwm (EXTMODE = H) - This mode uses a pwm device to generate + * the signal on the EXTCOMM pin + * + */ + if (device_property_read_string(dev, "sharp,vcom-mode", &vcom_mode_str)) + return dev_err_probe(dev, -EINVAL, + "Unable to find sharp,vcom-mode node in device tree\n"); + + if (!strcmp("software", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_SOFTWARE_VCOM; + smd->sw_vcom_signal = kthread_run(sharp_memory_sw_vcom_signal_thread, + smd, "sw_vcom_signal"); + + } else if (!strcmp("external", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_EXTERNAL_VCOM; + + } else if (!strcmp("pwm", vcom_mode_str)) { + smd->vcom_mode = SHARP_MEMORY_PWM_VCOM; + ret = sharp_memory_init_pwm_vcom_signal(smd); + if (ret) + return ret; + } else { + return dev_err_probe(dev, -EINVAL, "Invalid value set for vcom-mode\n"); + } + + drm->mode_config.min_width = smd->mode->hdisplay; + drm->mode_config.max_width = smd->mode->hdisplay; + drm->mode_config.min_height = smd->mode->vdisplay; + drm->mode_config.max_height = smd->mode->vdisplay; + + ret = sharp_memory_pipe_init(drm, smd, sharp_memory_formats, + ARRAY_SIZE(sharp_memory_formats), + NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize display pipeline.\n"); + + drm_plane_enable_fb_damage_clips(&smd->plane); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return dev_err_probe(dev, ret, "Failed to register drm device.\n"); + + drm_client_setup(drm, NULL); + + return 0; +} + +static void sharp_memory_remove(struct spi_device *spi) +{ + struct sharp_memory_device *smd = spi_get_drvdata(spi); + + drm_dev_unplug(&smd->drm); + drm_atomic_helper_shutdown(&smd->drm); + + switch (smd->vcom_mode) { + case SHARP_MEMORY_SOFTWARE_VCOM: + kthread_stop(smd->sw_vcom_signal); + break; + + case SHARP_MEMORY_EXTERNAL_VCOM: + break; + + case SHARP_MEMORY_PWM_VCOM: + pwm_disable(smd->pwm_vcom_signal); + break; + } +} + +static struct spi_driver sharp_memory_spi_driver = { + .driver = { + .name = "sharp_memory", + .of_match_table = sharp_memory_of_match, + }, + .probe = sharp_memory_probe, + .remove = sharp_memory_remove, + .id_table = sharp_memory_ids, +}; +module_spi_driver(sharp_memory_spi_driver); + +MODULE_AUTHOR("Alex Lanzano <lanzano.alex@gmail.com>"); +MODULE_DESCRIPTION("SPI Protocol driver for the sharp_memory display"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index d19e10289428..3182d32f1b8f 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/minmax.h> @@ -9,9 +10,9 @@ #include <linux/pm_domain.h> #include <linux/regulator/consumer.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> @@ -882,7 +883,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (mem) { void *screen_base; - ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem)); + ret = devm_aperture_acquire_for_platform_device(pdev, mem->start, + resource_size(mem)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret); return ERR_PTR(ret); @@ -902,7 +904,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (!res) return ERR_PTR(-EINVAL); - ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res)); + ret = devm_aperture_acquire_for_platform_device(pdev, res->start, + resource_size(res)); if (ret) { drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); return ERR_PTR(ret); @@ -1009,6 +1012,7 @@ DEFINE_DRM_GEM_FOPS(simpledrm_fops); static struct drm_driver simpledrm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -1026,7 +1030,6 @@ static int simpledrm_probe(struct platform_device *pdev) { struct simpledrm_device *sdev; struct drm_device *dev; - unsigned int color_mode; int ret; sdev = simpledrm_device_create(&simpledrm_driver, pdev); @@ -1038,11 +1041,7 @@ static int simpledrm_probe(struct platform_device *pdev) if (ret) return ret; - color_mode = drm_format_info_bpp(sdev->format, 0); - if (color_mode == 16) - color_mode = sdev->format->depth; // can be 15 or 16 - - drm_fbdev_shmem_setup(dev, color_mode); + drm_client_setup(dev, sdev->format); return 0; } diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index b9c6ed352182..97013685c62f 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -13,6 +13,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> @@ -290,6 +291,7 @@ static const struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", .desc = "Sitronix ST7586", @@ -371,7 +373,7 @@ static int st7586_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index 1676da00883d..0747ebd999cc 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -17,6 +17,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_atomic_helper.h> @@ -155,6 +156,7 @@ static const struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, + DRM_FBDEV_DMA_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", .desc = "Sitronix ST7735R", @@ -241,7 +243,7 @@ static int st7735r_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_fbdev_dma_setup(drm, 0); + drm_client_setup(drm, NULL); return 0; } diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index f0a7eb62116c..3139fd9128d8 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test) err = ttm_resource_alloc(bo, place, &res2); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, - list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1); + list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1); ttm_bo_unreserve(bo); KUNIT_ASSERT_EQ(test, - list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1); + list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1); ttm_resource_free(bo, &res1); ttm_resource_free(bo, &res2); diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c index 22260e7aea58..a9f4b81921c3 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c @@ -164,18 +164,18 @@ static void ttm_resource_init_pinned(struct kunit *test) res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, res); - KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable)); dma_resv_lock(bo->base.resv, NULL); ttm_bo_pin(bo); ttm_resource_init(bo, place, res); - KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable)); ttm_bo_unpin(bo); ttm_resource_fini(man, res); dma_resv_unlock(bo->base.resv); - KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned)); + KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable)); } static void ttm_resource_fini_basic(struct kunit *test) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 320592435252..48c5365efca1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -139,7 +139,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) goto out_err; } @@ -594,7 +594,8 @@ void ttm_bo_pin(struct ttm_buffer_object *bo) spin_lock(&bo->bdev->lru_lock); if (bo->resource) ttm_resource_del_bulk_move(bo->resource, bo); - ++bo->pin_count; + if (!bo->pin_count++ && bo->resource) + ttm_resource_move_to_lru_tail(bo->resource); spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_pin); @@ -613,9 +614,10 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) return; spin_lock(&bo->bdev->lru_lock); - --bo->pin_count; - if (bo->resource) + if (!--bo->pin_count && bo->resource) { ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + } spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_unpin); @@ -1128,9 +1130,20 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) if (bo->bdev->funcs->swap_notify) bo->bdev->funcs->swap_notify(bo); - if (ttm_tt_is_populated(bo->ttm)) + if (ttm_tt_is_populated(bo->ttm)) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); + ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags); + spin_lock(&bo->bdev->lru_lock); + if (ret) + ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + spin_unlock(&bo->bdev->lru_lock); + } + out: /* Consider -ENOMEM and -ENOSPC non-fatal. */ if (ret == -ENOMEM || ret == -ENOSPC) @@ -1180,3 +1193,47 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->bdev, bo->ttm); bo->ttm = NULL; } + +/** + * ttm_bo_populate() - Ensure that a buffer object has backing pages + * @bo: The buffer object + * @ctx: The ttm_operation_ctx governing the operation. + * + * For buffer objects in a memory type whose manager uses + * struct ttm_tt for backing pages, ensure those backing pages + * are present and with valid content. The bo's resource is also + * placed on the correct LRU list if it was previously swapped + * out. + * + * Return: 0 if successful, negative error code on failure. + * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible + * is set to true. + */ +int ttm_bo_populate(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + struct ttm_tt *tt = bo->ttm; + bool swapped; + int ret; + + dma_resv_assert_held(bo->base.resv); + + if (!tt) + return 0; + + swapped = ttm_tt_is_swapped(tt); + ret = ttm_tt_populate(bo->bdev, tt, ctx); + if (ret) + return ret; + + if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count && + bo->resource) { + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(bo->resource, bo); + ttm_resource_move_to_lru_tail(bo->resource); + spin_unlock(&bo->bdev->lru_lock); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_populate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3c07f4712d5c..d939925efa81 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -163,7 +163,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, src_man = ttm_manager_type(bdev, src_mem->mem_type); if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || dst_man->use_tt)) { - ret = ttm_tt_populate(bdev, ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) return ret; } @@ -350,7 +350,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; @@ -507,7 +507,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) pgprot_t prot; void *vaddr; - ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4212b8c91dd4..2c699ed1963a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -224,7 +224,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; ttm = bo->ttm; - err = ttm_tt_populate(bdev, bo->ttm, &ctx); + err = ttm_bo_populate(bo, &ctx); if (err) { if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index e7cc4954c1bc..02e797fd1891 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -216,7 +216,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); - INIT_LIST_HEAD(&bdev->pinned); + INIT_LIST_HEAD(&bdev->unevictable); bdev->dev_mapping = mapping; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); @@ -283,7 +283,7 @@ void ttm_device_clear_dma_mappings(struct ttm_device *bdev) struct ttm_resource_manager *man; unsigned int i, j; - ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned); + ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable); for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) { man = ttm_manager_type(bdev, i); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 6d764ba88aab..a87665eb28a6 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -30,6 +30,7 @@ #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_tt.h> #include <drm/drm_util.h> @@ -235,11 +236,26 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, } } +static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo) +{ + /* + * Take care when creating a new resource for a bo, that it is not considered + * swapped if it's not the current resource for the bo and is thus logically + * associated with the ttm_tt. Think a VRAM resource created to move a + * swapped-out bo to VRAM. + */ + if (bo->resource != res || !bo->ttm) + return false; + + dma_resv_assert_held(bo->base.resv); + return ttm_tt_is_swapped(bo->ttm); +} + /* Add the resource to a bulk move if the BO is configured for it */ void ttm_resource_add_bulk_move(struct ttm_resource *res, struct ttm_buffer_object *bo) { - if (bo->bulk_move && !bo->pin_count) + if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) ttm_lru_bulk_move_add(bo->bulk_move, res); } @@ -247,7 +263,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res, void ttm_resource_del_bulk_move(struct ttm_resource *res, struct ttm_buffer_object *bo) { - if (bo->bulk_move && !bo->pin_count) + if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo)) ttm_lru_bulk_move_del(bo->bulk_move, res); } @@ -259,8 +275,8 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res) lockdep_assert_held(&bo->bdev->lru_lock); - if (bo->pin_count) { - list_move_tail(&res->lru.link, &bdev->pinned); + if (bo->pin_count || ttm_resource_is_swapped(res, bo)) { + list_move_tail(&res->lru.link, &bdev->unevictable); } else if (bo->bulk_move) { struct ttm_lru_bulk_move_pos *pos = @@ -301,8 +317,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo, man = ttm_manager_type(bo->bdev, place->mem_type); spin_lock(&bo->bdev->lru_lock); - if (bo->pin_count) - list_add_tail(&res->lru.link, &bo->bdev->pinned); + if (bo->pin_count || ttm_resource_is_swapped(res, bo)) + list_add_tail(&res->lru.link, &bo->bdev->unevictable); else list_add_tail(&res->lru.link, &man->lru[bo->priority]); man->usage += res->size; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 4b51b9023126..3baf215eca23 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -367,7 +367,10 @@ error: } return ret; } + +#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST) EXPORT_SYMBOL(ttm_tt_populate); +#endif void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig index 5121fed571a5..a9d6fe535d88 100644 --- a/drivers/gpu/drm/tve200/Kconfig +++ b/drivers/gpu/drm/tve200/Kconfig @@ -6,6 +6,7 @@ config DRM_TVE200 depends on ARM || COMPILE_TEST depends on OF select DRM_BRIDGE + select DRM_CLIENT_SELECTION select DRM_PANEL_BRIDGE select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index acce210e2554..b30340a2141d 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -39,8 +39,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> @@ -149,6 +151,7 @@ static const struct drm_driver tve200_drm_driver = { .minor = 0, .patchlevel = 0, DRM_GEM_DMA_DRIVER_OPS, + DRM_FBDEV_DMA_DRIVER_OPS, }; static int tve200_probe(struct platform_device *pdev) @@ -221,11 +224,7 @@ static int tve200_probe(struct platform_device *pdev) if (ret < 0) goto clk_disable; - /* - * Passing in 16 here will make the RGB565 mode the default - * Passing in 32 will use XRGB8888 mode - */ - drm_fbdev_dma_setup(drm, 16); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index c744175c6992..d7a6abef7d78 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -5,6 +5,7 @@ config DRM_UDL depends on USB depends on USB_ARCH_HAS_HCD depends on MMU + select DRM_CLIENT_SELECTION select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 280a09a6e2ad..8d8ae40f945c 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <drm/drm_drv.h> +#include <drm/drm_client_setup.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> #include <drm/drm_gem_shmem_helper.h> @@ -73,6 +74,7 @@ static const struct drm_driver driver = { .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = udl_driver_gem_prime_import, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -117,7 +119,7 @@ static int udl_usb_probe(struct usb_interface *interface, DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); - drm_fbdev_shmem_setup(&udl->drm, 0); + drm_client_setup(&udl->drm, NULL); return 0; } diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index b7d673f1153b..fcf710926057 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -13,7 +13,8 @@ v3d-y := \ v3d_trace_points.o \ v3d_sched.o \ v3d_sysfs.o \ - v3d_submit.o + v3d_submit.o \ + v3d_gemfs.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index ebe52bef4ffb..73ab7dd31b17 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -107,6 +107,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); struct sg_table *sgt; + u64 align; int ret; /* So far we pin the BO in the MMU for its lifetime, so use @@ -116,6 +117,15 @@ v3d_bo_create_finish(struct drm_gem_object *obj) if (IS_ERR(sgt)) return PTR_ERR(sgt); + if (!v3d->gemfs) + align = SZ_4K; + else if (obj->size >= SZ_1M) + align = SZ_1M; + else if (obj->size >= SZ_64K) + align = SZ_64K; + else + align = SZ_4K; + spin_lock(&v3d->mm_lock); /* Allocate the object's space in the GPU's page tables. * Inserting PTEs will happen later, but the offset is for the @@ -123,7 +133,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) */ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, obj->size >> V3D_MMU_PAGE_SHIFT, - GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0); + align >> V3D_MMU_PAGE_SHIFT, 0, 0); spin_unlock(&v3d->mm_lock); if (ret) return ret; @@ -143,10 +153,12 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { struct drm_gem_shmem_object *shmem_obj; + struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, + v3d->gemfs); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index d7ff1f5fa481..fb35c5c3f1a7 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -36,6 +36,13 @@ #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 +/* Only expose the `super_pages` modparam if THP is enabled. */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool super_pages = true; +module_param_named(super_pages, super_pages, bool, 0400); +MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support."); +#endif + static int v3d_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -97,6 +104,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_MAX_PERF_COUNTERS: args->value = v3d->perfmon_info.max_counters; return 0; + case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES: + args->value = !!v3d->gemfs; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index cf4b23369dc4..de73eefff9ac 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,9 +19,8 @@ struct clk; struct platform_device; struct reset_control; -#define GMP_GRANULARITY (128 * 1024) - #define V3D_MMU_PAGE_SHIFT 12 +#define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) #define V3D_MAX_QUEUES (V3D_CPU + 1) @@ -137,6 +136,11 @@ struct v3d_dev { struct drm_mm mm; spinlock_t mm_lock; + /* + * tmpfs instance used for shmem backed objects + */ + struct vfsmount *gemfs; + struct work_struct overflow_mem_work; struct v3d_bin_job *bin_job; @@ -534,6 +538,11 @@ void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); +/* v3d_gemfs.c */ +extern bool super_pages; +void v3d_gemfs_init(struct v3d_dev *v3d); +void v3d_gemfs_fini(struct v3d_dev *v3d); + /* v3d_submit.c */ void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); @@ -553,6 +562,7 @@ void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); /* v3d_mmu.c */ +int v3d_mmu_flush_all(struct v3d_dev *v3d); int v3d_mmu_set_page_table(struct v3d_dev *v3d); void v3d_mmu_insert_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index da8faf3b9011..b1e681630ded 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -288,11 +288,14 @@ v3d_gem_init(struct drm_device *dev) v3d_init_hw_state(v3d); v3d_mmu_set_page_table(v3d); + v3d_gemfs_init(v3d); + ret = v3d_sched_init(v3d); if (ret) { drm_mm_takedown(&v3d->mm); - dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, + dma_free_coherent(v3d->drm.dev, pt_size, (void *)v3d->pt, v3d->pt_paddr); + return ret; } return 0; @@ -304,6 +307,7 @@ v3d_gem_destroy(struct drm_device *dev) struct v3d_dev *v3d = to_v3d_dev(dev); v3d_sched_fini(v3d); + v3d_gemfs_fini(v3d); /* Waiting for jobs to finish would need to be done before * unregistering V3D. diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c new file mode 100644 index 000000000000..4c5e18590a5c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2024 Raspberry Pi */ + +#include <linux/fs.h> +#include <linux/mount.h> + +#include "v3d_drv.h" + +void v3d_gemfs_init(struct v3d_dev *v3d) +{ + char huge_opt[] = "huge=within_size"; + struct file_system_type *type; + struct vfsmount *gemfs; + + /* + * By creating our own shmemfs mountpoint, we can pass in + * mount flags that better match our usecase. However, we + * only do so on platforms which benefit from it. + */ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + goto err; + + /* The user doesn't want to enable Super Pages */ + if (!super_pages) + goto err; + + type = get_fs_type("tmpfs"); + if (!type) + goto err; + + gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); + if (IS_ERR(gemfs)) + goto err; + + v3d->gemfs = gemfs; + drm_info(&v3d->drm, "Using Transparent Hugepages\n"); + + return; + +err: + v3d->gemfs = NULL; + drm_notice(&v3d->drm, + "Transparent Hugepage support is recommended for optimal performance on this platform!\n"); +} + +void v3d_gemfs_fini(struct v3d_dev *v3d) +{ + if (v3d->gemfs) + kern_unmount(v3d->gemfs); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index d469bda52c1a..20bf33702c3c 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -70,6 +70,8 @@ v3d_overflow_mem_work(struct work_struct *work) list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); + v3d_mmu_flush_all(v3d); + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT); V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 14f3af40d6f6..0f564fd7160c 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -25,39 +25,37 @@ * superpage bit set. */ #define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_BIGPAGE BIT(30) #define V3D_PTE_WRITEABLE BIT(29) #define V3D_PTE_VALID BIT(28) -static int v3d_mmu_flush_all(struct v3d_dev *v3d) +static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment) { - int ret; - - /* Make sure that another flush isn't already running when we - * start this one. - */ - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); - if (ret) - dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); + return IS_ALIGNED(page, alignment >> V3D_MMU_PAGE_SHIFT) && + IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT); +} - V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | - V3D_MMU_CTL_TLB_CLEAR); +int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; - V3D_WRITE(V3D_MMUC_CONTROL, - V3D_MMUC_CONTROL_FLUSH | + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH | V3D_MMUC_CONTROL_ENABLE); - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); if (ret) { - dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); return ret; } - ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & - V3D_MMUC_CONTROL_FLUSHING), 100); + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); if (ret) - dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n"); return ret; } @@ -87,19 +85,40 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct drm_gem_shmem_object *shmem_obj = &bo->base; struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; - u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - struct sg_dma_page_iter dma_iter; - - for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { - dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); - u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; - u32 pte = page_prot | page_address; - u32 i; - - BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= - BIT(24)); - for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) - v3d->pt[page++] = pte + i; + struct scatterlist *sgl; + unsigned int count; + + for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) { + dma_addr_t dma_addr = sg_dma_address(sgl); + u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT; + unsigned int len = sg_dma_len(sgl); + + while (len > 0) { + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + u32 page_address = page_prot | pfn; + unsigned int i, page_size; + + BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24)); + + if (len >= SZ_1M && + v3d_mmu_is_aligned(page, page_address, SZ_1M)) { + page_size = SZ_1M; + page_address |= V3D_PTE_SUPERPAGE; + } else if (len >= SZ_64K && + v3d_mmu_is_aligned(page, page_address, SZ_64K)) { + page_size = SZ_64K; + page_address |= V3D_PTE_BIGPAGE; + } else { + page_size = SZ_4K; + } + + for (i = 0; i < page_size >> V3D_MMU_PAGE_SHIFT; i++) { + v3d->pt[page++] = page_address + i; + pfn++; + } + + len -= page_size; + } } WARN_ON_ONCE(page - bo->node.start != diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 00cd081d7873..156be13ab2ef 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -409,11 +409,7 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, if (req->pad != 0) return -EINVAL; - mutex_lock(&v3d_priv->perfmon.lock); - perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); - v3d_perfmon_get(perfmon); - mutex_unlock(&v3d_priv->perfmon.lock); - + perfmon = v3d_perfmon_find(v3d_priv, req->id); if (!perfmon) return -EINVAL; diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 08d2a2739582..99ac4995b5a1 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -135,8 +135,31 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_stats *global_stats = &v3d->queue[queue].stats; struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); - - preempt_disable(); + unsigned long flags; + + /* + * We only need to disable local interrupts to appease lockdep who + * otherwise would think v3d_job_start_stats vs v3d_stats_update has an + * unsafe in-irq vs no-irq-off usage problem. This is a false positive + * because all the locks are per queue and stats type, and all jobs are + * completely one at a time serialised. More specifically: + * + * 1. Locks for GPU queues are updated from interrupt handlers under a + * spin lock and started here with preemption disabled. + * + * 2. Locks for CPU queues are updated from the worker with preemption + * disabled and equally started here with preemption disabled. + * + * Therefore both are consistent. + * + * 3. Because next job can only be queued after the previous one has + * been signaled, and locks are per queue, there is also no scope for + * the start part to race with the update part. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); write_seqcount_begin(&local_stats->lock); local_stats->start_ns = now; @@ -146,7 +169,10 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) global_stats->start_ns = now; write_seqcount_end(&global_stats->lock); - preempt_enable(); + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); } static void @@ -167,11 +193,21 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_stats *global_stats = &v3d->queue[queue].stats; struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); + unsigned long flags; + + /* See comment in v3d_job_start_stats() */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); - preempt_disable(); v3d_stats_update(local_stats, now); v3d_stats_update(global_stats, now); - preempt_enable(); + + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); } static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) @@ -667,7 +703,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_start(&v3d->queue[q].sched); + drm_sched_start(&v3d->queue[q].sched, 0); } mutex_unlock(&v3d->reset_lock); diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig index 45fe135d6e43..180e30b82ab9 100644 --- a/drivers/gpu/drm/vboxvideo/Kconfig +++ b/drivers/gpu/drm/vboxvideo/Kconfig @@ -2,6 +2,7 @@ config DRM_VBOXVIDEO tristate "Virtual Box Graphics Card" depends on DRM && X86 && PCI + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index ef36834c8673..a536c467e2b2 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -7,12 +7,14 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_file.h> @@ -44,7 +46,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; @@ -80,7 +82,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_irq_fini; - drm_fbdev_ttm_setup(&vbox->ddev, 32); + drm_client_setup(&vbox->ddev, NULL); return 0; @@ -193,6 +195,7 @@ static const struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_VRAM_DRIVER, + DRM_FBDEV_TTM_DRIVER_OPS, }; drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset); diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 269b5f26b2ea..c5f30b317698 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -9,6 +9,7 @@ config DRM_VC4 depends on SND && SND_SOC depends on COMMON_CLK depends on PM + select DRM_CLIENT_SELECTION select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDMI_STATE_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c index 0731a7d85d7a..6527fb1db71e 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c @@ -155,11 +155,11 @@ KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister, drm_dev_unregister, struct drm_device *); -static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) +static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen) { struct drm_device *drm; - const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver; - const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock; + const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver; + const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock; struct vc4_dev *vc4; struct device *dev; int ret; @@ -173,9 +173,9 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); vc4->dev = dev; - vc4->is_vc5 = is_vc5; + vc4->gen = gen; - vc4->hvs = __vc4_hvs_alloc(vc4, NULL); + vc4->hvs = __vc4_hvs_alloc(vc4, NULL, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs); drm = &vc4->base; @@ -198,10 +198,10 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) struct vc4_dev *vc4_mock_device(struct kunit *test) { - return __mock_device(test, false); + return __mock_device(test, VC4_GEN_4); } struct vc4_dev *vc5_mock_device(struct kunit *test) { - return __mock_device(test, true); + return __mock_device(test, VC4_GEN_5); } diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3f72be7490d5..fb450b6a4d44 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->purgeable.lock); @@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* list_del_init() is used here because the caller might release @@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); bo = kzalloc(sizeof(*bo), GFP_KERNEL); @@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, struct drm_gem_dma_object *dma_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); if (size == 0) @@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *file_priv, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_dumb_fixup_args(args); @@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Fast path: if the BO is already retained by someone, no need to @@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Fast path: if the BO is still retained by someone, no need to test @@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_grab_bin_bo(vc4, vc4file); @@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; gem_obj = drm_gem_object_lookup(file_priv, args->handle); @@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->size == 0) @@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; bool t_format; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0) @@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0 || args->modifier != 0) @@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device *dev) int ret; int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Create the initial set of BO labels that the kernel will @@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; int ret = 0, label; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!args->len) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 8b5a7e5eb146..575900ee67a5 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -105,6 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int channel = vc4_crtc_state->assigned_channel; unsigned int cob_size; u32 val; int fifo_lines; @@ -121,7 +122,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, * Read vertical scanline which is currently composed for our * pixelvalve by the HVS, and also the scaler status. */ - val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel)); + val = HVS_READ(SCALER_DISPSTATX(channel)); /* Get optional system timestamp after query. */ if (etime) @@ -137,11 +138,11 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, *vpos /= 2; /* Use hpos to correct for field offset in interlaced mode. */ - if (vc4_hvs_get_fifo_frame_count(hvs, vc4_crtc_state->assigned_channel) % 2) + if (vc4_hvs_get_fifo_frame_count(hvs, channel) % 2) *hpos += mode->crtc_htotal / 2; } - cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel); + cob_size = vc4_crtc_get_cob_allocation(vc4, channel); /* This is the offset we need for translating hvs -> pv scanout pos. */ fifo_lines = cob_size / mode->crtc_hdisplay; @@ -263,7 +264,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) * Removing 1 from the FIFO full level however * seems to completely remove that issue. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; @@ -428,7 +429,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) CRTC_WRITE(PV_MUX_CFG, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); @@ -735,10 +736,17 @@ int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (conn_state->crtc != crtc) continue; - vc4_state->margins.left = conn_state->tv.margins.left; - vc4_state->margins.right = conn_state->tv.margins.right; - vc4_state->margins.top = conn_state->tv.margins.top; - vc4_state->margins.bottom = conn_state->tv.margins.bottom; + if (memcmp(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins))) { + memcpy(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins)); + + /* + * Need to force the dlist entries for all planes to be + * updated so that the dest rectangles are changed. + */ + crtc_state->zpos_changed = true; + } break; } @@ -913,7 +921,7 @@ static int vc4_async_set_fence_cb(struct drm_device *dev, struct dma_fence *fence; int ret; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, @@ -1000,7 +1008,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* @@ -1043,7 +1051,7 @@ int vc4_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - if (vc4->is_vc5) + if (vc4->gen > VC4_GEN_4) return vc5_async_page_flip(crtc, fb, event, flags); else return vc4_async_page_flip(crtc, fb, event, flags); @@ -1338,9 +1346,8 @@ int __vc4_crtc_init(struct drm_device *drm, drm_crtc_helper_add(crtc, crtc_helper_funcs); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); - drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); /* We support CTM, but only for one CRTC at a time. It's therefore diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index c133e96b8aca..d47e5967592f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -20,6 +20,7 @@ * driver. */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/device.h> @@ -30,10 +31,11 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> @@ -98,7 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) @@ -147,7 +149,7 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); @@ -165,7 +167,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (vc4file->bin_bo_used) @@ -212,6 +214,7 @@ const struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), @@ -235,6 +238,7 @@ const struct drm_driver vc5_drm_driver = { #endif DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &vc4_drm_fops, @@ -291,13 +295,17 @@ static int vc4_drm_bind(struct device *dev) struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; - bool is_vc5; + enum vc4_gen gen; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); - is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); - if (is_vc5) + if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5")) + gen = VC4_GEN_5; + else + gen = VC4_GEN_4; + + if (gen > VC4_GEN_4) driver = &vc5_drm_driver; else driver = &vc4_drm_driver; @@ -315,13 +323,13 @@ static int vc4_drm_bind(struct device *dev) vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); - vc4->is_vc5 = is_vc5; + vc4->gen = gen; vc4->dev = dev; drm = &vc4->base; platform_set_drvdata(pdev, drm); - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = drmm_mutex_init(drm, &vc4->bin_bo_lock); if (ret) goto err; @@ -335,7 +343,7 @@ static int vc4_drm_bind(struct device *dev) if (ret) goto err; - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = vc4_gem_init(drm); if (ret) goto err; @@ -352,7 +360,7 @@ static int vc4_drm_bind(struct device *dev) } } - ret = drm_aperture_remove_framebuffers(driver); + ret = aperture_remove_all_conflicting_devices(driver->name); if (ret) goto err; @@ -389,7 +397,7 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto err; - drm_fbdev_dma_setup(drm, 16); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); return 0; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 08e29fa82563..c6be1997f1c7 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -15,6 +15,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_encoder.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mm.h> @@ -80,11 +81,16 @@ struct vc4_perfmon { u64 counters[] __counted_by(ncounters); }; +enum vc4_gen { + VC4_GEN_4, + VC4_GEN_5, +}; + struct vc4_dev { struct drm_device base; struct device *dev; - bool is_vc5; + enum vc4_gen gen; unsigned int irq; @@ -315,6 +321,7 @@ struct vc4_hvs { struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; + unsigned int dlist_mem_size; struct clk *core_clk; @@ -394,7 +401,7 @@ struct vc4_plane_state { */ u32 pos0_offset; u32 pos2_offset; - u32 ptr0_offset; + u32 ptr0_offset[DRM_FORMAT_MAX_PLANES]; u32 lbm_offset; /* Offset where the plane's dlist was last stored in the @@ -404,7 +411,7 @@ struct vc4_plane_state { /* Clipped coordinates of the plane on the display. */ int crtc_x, crtc_y, crtc_w, crtc_h; - /* Clipped area being scanned from in the FB. */ + /* Clipped area being scanned from in the FB in u16.16 format */ u32 src_x, src_y; u32 src_w[2], src_h[2]; @@ -414,11 +421,6 @@ struct vc4_plane_state { bool is_unity; bool is_yuv; - /* Offset to start scanning out from the start of the plane's - * BO. - */ - u32 offsets[3]; - /* Our allocation in LBM for temporary storage during scaling. */ struct drm_mm_node lbm; @@ -598,12 +600,7 @@ struct vc4_crtc_state { bool txp_armed; unsigned int assigned_channel; - struct { - unsigned int left; - unsigned int right; - unsigned int top; - unsigned int bottom; - } margins; + struct drm_connector_tv_margins margins; unsigned long hvs_load; @@ -1002,7 +999,9 @@ void vc4_irq_reset(struct drm_device *dev); /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev); +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev); void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 24fb1b57e1dd..22bccd69eb62 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, u32 i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, unsigned long timeout_expire; DEFINE_WAIT(wait); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (vc4->finished_seqno >= seqno) @@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; again: @@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* A previous RCL may have written to one of our textures, and @@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; list_move_tail(&exec->head, &vc4->render_job_list); @@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; spin_lock_irqsave(&vc4->job_lock, irqflags); @@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; cb->func = func; @@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, @@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->pad != 0) @@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, args->shader_rec_size, args->bo_handle_count); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -1267,7 +1267,7 @@ int vc4_gem_init(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4->dma_fence_context = dma_fence_context_alloc(1); @@ -1326,7 +1326,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; switch (args->madv) { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 6611ab7c26a6..62b82b1eeb36 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -147,6 +147,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) if (!drm_dev_enter(drm, &idx)) return -ENODEV; + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); drm_print_regset32(&p, &vc4_hdmi->hd_regset); drm_print_regset32(&p, &vc4_hdmi->cec_regset); @@ -156,6 +158,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) drm_print_regset32(&p, &vc4_hdmi->ram_regset); drm_print_regset32(&p, &vc4_hdmi->rm_regset); + pm_runtime_put(&vc4_hdmi->pdev->dev); + drm_dev_exit(idx); return 0; @@ -1594,6 +1598,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_UNDERFLOW_ENABLE | VC4_HD_VID_CTL_FRAME_COUNTER_RESET | + VC4_HD_VID_CTL_BLANK_INSERT_EN | (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); @@ -1920,7 +1925,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data) } if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { - ret = -ENODEV; + ret = -ENOTSUPP; goto out_dev_exit; } @@ -2047,6 +2052,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_device *drm = vc4_hdmi->connector.dev; struct drm_connector *connector = &vc4_hdmi->connector; + struct vc4_dev *vc4 = to_vc4_dev(drm); unsigned int sample_rate = params->sample_rate; unsigned int channels = params->channels; unsigned long flags; @@ -2104,11 +2110,18 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, VC4_HDMI_AUDIO_PACKET_CEA_MASK); /* Set the MAI threshold */ - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW)); + if (vc4->gen >= VC4_GEN_5) + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW)); + else + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW)); HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index b04b2fc8d831..68455ce513e7 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -498,8 +498,11 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, field = &variant->registers[reg]; base = __vc4_hdmi_get_field_base(hdmi, field->reg); - if (!base) + if (!base) { + dev_warn(&hdmi->pdev->dev, + "Unknown register ID %u\n", reg); return; + } writel(value, base + field->offset); } diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2a835a5cff9d..1edf6e3fa7e6 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -33,7 +33,7 @@ #include "vc4_drv.h" #include "vc4_regs.h" -static const struct debugfs_reg32 hvs_regs[] = { +static const struct debugfs_reg32 vc4_hvs_regs[] = { VC4_REG32(SCALER_DISPCTRL), VC4_REG32(SCALER_DISPSTAT), VC4_REG32(SCALER_DISPID), @@ -110,7 +110,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct drm_printer p = drm_seq_file_printer(m); - unsigned int next_entry_start = 0; + unsigned int dlist_mem_size = hvs->dlist_mem_size; + unsigned int next_entry_start; unsigned int i, j; u32 dlist_word, dispstat; @@ -124,8 +125,9 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) } drm_printf(&p, "HVS chan %u:\n", i); + next_entry_start = 0; - for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) { + for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) { dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j); drm_printf(&p, "dlist: %02d: 0x%08x\n", j, dlist_word); @@ -222,6 +224,9 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, if (!drm_dev_enter(drm, &idx)) return; + if (hvs->vc4->gen != VC4_GEN_4) + goto exit; + /* The LUT memory is laid out with each HVS channel in order, * each of which takes 256 writes for R, 256 for G, then 256 * for B. @@ -237,6 +242,7 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); +exit: drm_dev_exit(idx); } @@ -291,53 +297,60 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) u32 reg; int ret; - if (!vc4->is_vc5) + switch (vc4->gen) { + case VC4_GEN_4: return output; - /* - * NOTE: We should probably use drm_dev_enter()/drm_dev_exit() - * here, but this function is only used during the DRM device - * initialization, so we should be fine. - */ + case VC4_GEN_5: + /* + * NOTE: We should probably use + * drm_dev_enter()/drm_dev_exit() here, but this + * function is only used during the DRM device + * initialization, so we should be fine. + */ - switch (output) { - case 0: - return 0; + switch (output) { + case 0: + return 0; - case 1: - return 1; + case 1: + return 1; - case 2: - reg = HVS_READ(SCALER_DISPECTRL); - ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); - if (ret == 0) - return 2; + case 2: + reg = HVS_READ(SCALER_DISPECTRL); + ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); + if (ret == 0) + return 2; - return 0; + return 0; - case 3: - reg = HVS_READ(SCALER_DISPCTRL); - ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 3: + reg = HVS_READ(SCALER_DISPCTRL); + ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 4: - reg = HVS_READ(SCALER_DISPEOLN); - ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 4: + reg = HVS_READ(SCALER_DISPEOLN); + ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 5: - reg = HVS_READ(SCALER_DISPDITHER); - ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 5: + reg = HVS_READ(SCALER_DISPDITHER); + ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; + + default: + return -EPIPE; + } default: return -EPIPE; @@ -372,7 +385,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispctrl = SCALER_DISPCTRLX_ENABLE; dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, @@ -394,7 +407,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | - ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | + ((vc4->gen == VC4_GEN_4) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if @@ -415,13 +428,11 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan) if (!drm_dev_enter(drm, &idx)) return; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)) goto out; - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); /* Once we leave, the scaler should be disabled and its fifo empty. */ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); @@ -456,17 +467,29 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) if (hweight32(crtc_state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) - dlist_count += vc4_plane_dlist_size(plane_state); + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + u32 plane_dlist_count = vc4_plane_dlist_size(plane_state); + + drm_dbg_driver(dev, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name, + plane_dlist_count); + + dlist_count += plane_dlist_count; + } dlist_count++; /* Account for SCALER_CTL0_END. */ + drm_dbg_driver(dev, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n", + crtc->base.id, crtc->name, dlist_count); spin_lock_irqsave(&vc4->hvs->mm_lock, flags); ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, dlist_count); spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); - if (ret) + if (ret) { + drm_err(dev, "Failed to allocate DLIST entry: %d\n", ret); return ret; + } return 0; } @@ -580,7 +603,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, } if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) - return; + goto exit; if (debug_dump_regs) { DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); @@ -663,12 +686,14 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, vc4_hvs_dump_state(hvs); } +exit: drm_dev_exit(idx); } void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -676,8 +701,9 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl &= ~((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPCTRL, dispctrl); @@ -686,7 +712,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -694,8 +721,9 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl |= ((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_EUFLOW(channel)); @@ -738,8 +766,10 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) control = HVS_READ(SCALER_DISPCTRL); for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { - dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel); + dspeislur = (vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel); + /* Interrupt masking is not always honored, so check it here. */ if (status & SCALER_DISPSTAT_EUFLOW(channel) && control & dspeislur) { @@ -767,7 +797,7 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) if (!vc4->hvs) return -ENODEV; - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, minor->debugfs_root, &vc4->load_tracker_enabled); @@ -781,7 +811,9 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) return 0; } -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev) +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev) { struct drm_device *drm = &vc4->base; struct vc4_hvs *hvs; @@ -791,6 +823,7 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return ERR_PTR(-ENOMEM); hvs->vc4 = vc4; + hvs->regs = regs; hvs->pdev = pdev; spin_lock_init(&hvs->mm_lock); @@ -800,16 +833,17 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde * our 16K), since we don't want to scramble the screen when * transitioning from the firmware's boot setup to runtime. */ + hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END; drm_mm_init(&hvs->dlist_mm, HVS_BOOTLOADER_DLIST_END, - (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); + hvs->dlist_mem_size); /* Set up the HVS LBM memory manager. We could have some more * complicated data structure that allowed reuse of LBM areas * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else @@ -821,79 +855,14 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return hvs; } -static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +static int vc4_hvs_hw_init(struct vc4_hvs *hvs) { - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); - struct vc4_hvs *hvs = NULL; - int ret; - u32 dispctrl; - u32 reg, top; - - hvs = __vc4_hvs_alloc(vc4, NULL); - if (IS_ERR(hvs)) - return PTR_ERR(hvs); - - hvs->regs = vc4_ioremap_regs(pdev, 0); - if (IS_ERR(hvs->regs)) - return PTR_ERR(hvs->regs); - - hvs->regset.base = hvs->regs; - hvs->regset.regs = hvs_regs; - hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - - if (vc4->is_vc5) { - struct rpi_firmware *firmware; - struct device_node *node; - unsigned int max_rate; - - node = rpi_firmware_find_node(); - if (!node) - return -EINVAL; - - firmware = rpi_firmware_get(node); - of_node_put(node); - if (!firmware) - return -EPROBE_DEFER; - - hvs->core_clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(hvs->core_clk)) { - dev_err(&pdev->dev, "Couldn't get core clock\n"); - return PTR_ERR(hvs->core_clk); - } - - max_rate = rpi_firmware_clk_get_max_rate(firmware, - RPI_FIRMWARE_CORE_CLK_ID); - rpi_firmware_put(firmware); - if (max_rate >= 550000000) - hvs->vc5_hdmi_enable_hdmi_20 = true; - - if (max_rate >= 600000000) - hvs->vc5_hdmi_enable_4096by2160 = true; - - hvs->max_core_rate = max_rate; - - ret = clk_prepare_enable(hvs->core_clk); - if (ret) { - dev_err(&pdev->dev, "Couldn't enable the core clock\n"); - return ret; - } - } - - if (!vc4->is_vc5) - hvs->dlist = hvs->regs + SCALER_DLIST_START; - else - hvs->dlist = hvs->regs + SCALER5_DLIST_START; + struct vc4_dev *vc4 = hvs->vc4; + u32 dispctrl, reg; - /* Upload filter kernels. We only have the one for now, so we - * keep it around for the lifetime of the driver. - */ - ret = vc4_hvs_upload_linear_kernel(hvs, - &hvs->mitchell_netravali_filter, - mitchell_netravali_1_3_1_3_kernel); - if (ret) - return ret; + dispctrl = HVS_READ(SCALER_DISPCTRL); + dispctrl |= SCALER_DISPCTRL_ENABLE; + HVS_WRITE(SCALER_DISPCTRL, dispctrl); reg = HVS_READ(SCALER_DISPECTRL); reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; @@ -916,13 +885,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); dispctrl = HVS_READ(SCALER_DISPCTRL); - - dispctrl |= SCALER_DISPCTRL_ENABLE; dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | @@ -962,11 +929,33 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + /* Set AXI panic mode. + * VC4 panics when < 2 lines in FIFO. + * VC5 panics when less than 1 line in the FIFO. + */ + dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | + SCALER_DISPCTRL_PANIC1_MASK | + SCALER_DISPCTRL_PANIC2_MASK); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); - /* Recompute Composite Output Buffer (COB) allocations for the displays + return 0; +} + +static int vc4_hvs_cob_init(struct vc4_hvs *hvs) +{ + struct vc4_dev *vc4 = hvs->vc4; + u32 reg, top; + + /* + * Recompute Composite Output Buffer (COB) allocations for the + * displays */ - if (!vc4->is_vc5) { + switch (vc4->gen) { + case VC4_GEN_4: /* The COB is 20736 pixels, or just over 10 lines at 2048 wide. * The bottom 2048 pixels are full 32bpp RGBA (intended for the * TXP composing RGBA to memory), whilst the remainder are only @@ -990,7 +979,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC4_COB_SIZE; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE0, reg); - } else { + break; + + case VC4_GEN_5: /* The COB is 44416 pixels, or 10.8 lines at 4096 wide. * The bottom 4096 pixels are full RGBA (intended for the TXP * composing RGBA to memory), whilst the remainder are only @@ -1016,8 +1007,96 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC5_COB_SIZE; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE0, reg); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hvs *hvs = NULL; + void __iomem *regs; + int ret; + + regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + hvs = __vc4_hvs_alloc(vc4, regs, pdev); + if (IS_ERR(hvs)) + return PTR_ERR(hvs); + + hvs->regset.base = hvs->regs; + hvs->regset.regs = vc4_hvs_regs; + hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs); + + if (vc4->gen == VC4_GEN_5) { + struct rpi_firmware *firmware; + struct device_node *node; + unsigned int max_rate; + + node = rpi_firmware_find_node(); + if (!node) + return -EINVAL; + + firmware = rpi_firmware_get(node); + of_node_put(node); + if (!firmware) + return -EPROBE_DEFER; + + hvs->core_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hvs->core_clk)) { + dev_err(&pdev->dev, "Couldn't get core clock\n"); + return PTR_ERR(hvs->core_clk); + } + + max_rate = rpi_firmware_clk_get_max_rate(firmware, + RPI_FIRMWARE_CORE_CLK_ID); + rpi_firmware_put(firmware); + if (max_rate >= 550000000) + hvs->vc5_hdmi_enable_hdmi_20 = true; + + if (max_rate >= 600000000) + hvs->vc5_hdmi_enable_4096by2160 = true; + + hvs->max_core_rate = max_rate; + + ret = clk_prepare_enable(hvs->core_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the core clock\n"); + return ret; + } } + if (vc4->gen == VC4_GEN_4) + hvs->dlist = hvs->regs + SCALER_DLIST_START; + else + hvs->dlist = hvs->regs + SCALER5_DLIST_START; + + ret = vc4_hvs_hw_init(hvs); + if (ret) + return ret; + + /* Upload filter kernels. We only have the one for now, so we + * keep it around for the lifetime of the driver. + */ + ret = vc4_hvs_upload_linear_kernel(hvs, + &hvs->mitchell_netravali_filter, + mitchell_netravali_1_3_1_3_kernel); + if (ret) + return ret; + + ret = vc4_hvs_cob_init(hvs); + if (ret) + return ret; + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_hvs_irq_handler, 0, "vc4 hvs", drm); if (ret) diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index ef93d8e22a35..69b399f3b802 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *dev, int irq) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (irq == IRQ_NOTCONNECTED) @@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; vc4_irq_disable(dev); @@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Acknowledge any stale IRQs. */ diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 5495f2a94fa9..58bbb9efc2df 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -369,7 +369,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long state_rate = max(old_hvs_state->core_clock_rate, new_hvs_state->core_clock_rate); unsigned long core_rate = clamp_t(unsigned long, state_rate, @@ -388,7 +388,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) vc5_hvs_pv_muxing_commit(vc4, state); else vc4_hvs_pv_muxing_commit(vc4, state); @@ -406,7 +406,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long core_rate = min_t(unsigned long, hvs->max_core_rate, new_hvs_state->core_clock_rate); @@ -461,7 +461,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_mode_fb_cmd2 mode_cmd_local; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); /* If the user didn't specify a modifier, use the @@ -1040,7 +1040,7 @@ int vc4_kms_load(struct drm_device *dev) * the BCM2711, but the load tracker computations are used for * the core clock rate calculation. */ - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ @@ -1056,7 +1056,7 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { dev->mode_config.max_width = 7680; dev->mode_config.max_height = 7680; } else { @@ -1064,7 +1064,7 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.max_height = 2048; } - dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; + dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs; dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c00a5cc2316d..f1342f917cf7 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; refcount_inc(&perfmon->refcnt); @@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (refcount_dec_and_test(&perfmon->refcnt)) @@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) unsigned int i; u32 mask; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) @@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, { unsigned int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!vc4->active_perfmon || @@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) struct vc4_dev *vc4 = vc4file->dev; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; mutex_lock(&vc4file->perfmon.lock); @@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_init(&vc4file->perfmon.lock); @@ -131,7 +131,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4file->perfmon.lock); @@ -151,7 +151,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, unsigned int i; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -205,7 +205,7 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vc4_perfmon_destroy *req = data; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -233,7 +233,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -241,11 +241,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, return -ENODEV; } - mutex_lock(&vc4file->perfmon.lock); - perfmon = idr_find(&vc4file->perfmon.idr, req->id); - vc4_perfmon_get(perfmon); - mutex_unlock(&vc4file->perfmon.lock); - + perfmon = vc4_perfmon_find(vc4file, req->id); if (!perfmon) return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 07caf2a47c6c..ba6e86d62a77 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -110,6 +110,18 @@ static const struct hvs_format { .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, }, { + .drm = DRM_FORMAT_YUV444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCBCR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR, + }, + { + .drm = DRM_FORMAT_YVU444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCRCB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, + }, + { .drm = DRM_FORMAT_YUV420, .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, .pixel_order = HVS_PIXEL_ORDER_XYCBCR, @@ -251,9 +263,9 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) { - if (dst == src) + if (dst == src >> 16) return VC4_SCALING_NONE; - if (3 * dst >= 2 * src) + if (3 * dst >= 2 * (src >> 16)) return VC4_SCALING_PPF; else return VC4_SCALING_TPZ; @@ -438,12 +450,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; - struct drm_gem_dma_object *bo; int num_planes = fb->format->num_planes; struct drm_crtc_state *crtc_state; u32 h_subsample = fb->format->hsub; u32 v_subsample = fb->format->vsub; - int i, ret; + int ret; crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); @@ -457,20 +468,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) if (ret) return ret; - for (i = 0; i < num_planes; i++) { - bo = drm_fb_dma_get_gem_obj(fb, i); - vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i]; - } - - /* - * We don't support subpixel source positioning for scaling, - * but fractional coordinates can be generated by clipping - * so just round for now - */ - vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16); - vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16); - vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x; - vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y; + vc4_state->src_x = state->src.x1; + vc4_state->src_y = state->src.y1; + vc4_state->src_w[0] = state->src.x2 - vc4_state->src_x; + vc4_state->src_h[0] = state->src.y2 - vc4_state->src_y; vc4_state->crtc_x = state->dst.x1; vc4_state->crtc_y = state->dst.y1; @@ -510,6 +511,12 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) */ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) vc4_state->x_scaling[1] = VC4_SCALING_PPF; + + /* Similarly UV needs vertical scaling to be enabled. + * Without this a 1:1 scaled YUV422 plane isn't rendered. + */ + if (vc4_state->y_scaling[1] == VC4_SCALING_NONE) + vc4_state->y_scaling[1] = VC4_SCALING_PPF; } else { vc4_state->is_yuv = false; vc4_state->x_scaling[1] = VC4_SCALING_NONE; @@ -523,7 +530,7 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) { u32 scale, recip; - scale = (1 << 16) * src / dst; + scale = src / dst; /* The specs note that while the reciprocal would be defined * as (1<<32)/scale, ~0 is close enough. @@ -537,14 +544,61 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); } -static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) +/* phase magnitude bits */ +#define PHASE_BITS 6 + +static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst, + u32 xy, int channel) { - u32 scale = (1 << 16) * src / dst; + u32 scale = src / dst; + s32 offset, offset2; + s32 phase; + + /* + * Start the phase at 1/2 pixel from the 1st pixel at src_x. + * 1/4 pixel for YUV. + */ + if (channel) { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0x1ffff) >> (16 - PHASE_BITS) >> 1; + offset += -(1 << PHASE_BITS >> 2); + } else { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0xffff) >> (16 - PHASE_BITS); + offset += -(1 << PHASE_BITS >> 1); + + /* + * This is a kludge to make sure the scaling factors are + * consistent with YUV's luma scaling. We lose 1-bit precision + * because of this. + */ + scale &= ~1; + } + + /* + * There may be a also small error introduced by precision of scale. + * Add half of that as a compromise + */ + offset2 = src - dst * scale; + offset2 >>= 16 - PHASE_BITS; + phase = offset + (offset2 >> 1); + + /* Ensure +ve values don't touch the sign bit, then truncate negative values */ + if (phase >= 1 << PHASE_BITS) + phase = (1 << PHASE_BITS) - 1; + + phase &= SCALER_PPF_IPHASE_MASK; vc4_dlist_write(vc4_state, SCALER_PPF_AGC | VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | - VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); + VC4_SET_FIELD(phase, SCALER_PPF_IPHASE)); } static u32 vc4_lbm_size(struct drm_plane_state *state) @@ -569,7 +623,7 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ) pix_per_line = vc4_state->crtc_w; else - pix_per_line = vc4_state->src_w[0]; + pix_per_line = vc4_state->src_w[0] >> 16; if (!vc4_state->is_yuv) { if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) @@ -587,10 +641,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) } /* Align it to 64 or 128 (hvs5) bytes */ - lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64); + lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64); /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ - lbm /= vc4->is_vc5 ? 4 : 2; + lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2; return lbm; } @@ -602,27 +656,27 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, /* Ch0 H-PPF Word 0: Scaling Parameters */ if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_ppf(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w, vc4_state->src_x, channel); } /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_ppf(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h, vc4_state->src_y, channel); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_tpz(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w); } /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_tpz(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } } @@ -660,7 +714,8 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) for (i = 0; i < fb->format->num_planes; i++) { /* Even if the bandwidth/plane required for a single frame is * - * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh + * (vc4_state->src_w[i] >> 16) * (vc4_state->src_h[i] >> 16) * + * cpp * vrefresh * * when downscaling, we have to read more pixels per line in * the time frame reserved for a single line, so the bandwidth @@ -669,11 +724,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) * load by this number. We're likely over-estimating the read * demand, but that's better than under-estimating it. */ - vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], + vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i] >> 16, vc4_state->crtc_h); - vc4_state->membus_load += vc4_state->src_w[i] * - vc4_state->src_h[i] * vscale_factor * - fb->format->cpp[i]; + vc4_state->membus_load += (vc4_state->src_w[i] >> 16) * + (vc4_state->src_h[i] >> 16) * + vscale_factor * fb->format->cpp[i]; vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; } @@ -684,7 +739,9 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) static int vc4_plane_allocate_lbm(struct drm_plane_state *state) { - struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); + struct drm_device *drm = state->plane->dev; + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct drm_plane *plane = state->plane; struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); unsigned long irqflags; u32 lbm_size; @@ -693,6 +750,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) if (!lbm_size) return 0; + if (vc4->gen == VC4_GEN_5) + lbm_size = ALIGN(lbm_size, 64); + else if (vc4->gen == VC4_GEN_4) + lbm_size = ALIGN(lbm_size, 32); + + drm_dbg_driver(drm, "[PLANE:%d:%s] LBM Allocation Size: %u\n", + plane->base.id, plane->name, lbm_size); + if (WARN_ON(!vc4_state->lbm_offset)) return -EINVAL; @@ -705,13 +770,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, - lbm_size, - vc4->is_vc5 ? 64 : 32, + lbm_size, 1, 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); - if (ret) + if (ret) { + drm_err(drm, "Failed to allocate LBM entry: %d\n", ret); return ret; + } } else { WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); } @@ -826,9 +892,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, bool mix_plane_alpha; bool covers_screen; u32 scl0, scl1, pitch0; - u32 tiling, src_y; + u32 tiling, src_x, src_y; + u32 width, height; u32 hvs_format = format->hvs; unsigned int rotation; + u32 offsets[3] = { 0 }; int ret, i; if (vc4_state->dlist_initialized) @@ -838,6 +906,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (ret) return ret; + width = vc4_state->src_w[0] >> 16; + height = vc4_state->src_h[0] >> 16; + /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB * and 4:4:4, scl1 should be set to scl0 so both channels of * the scaler do the same thing. For YUV, the Y plane needs @@ -858,9 +929,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, DRM_MODE_REFLECT_Y); /* We must point to the last line when Y reflection is enabled. */ - src_y = vc4_state->src_y; + src_y = vc4_state->src_y >> 16; if (rotation & DRM_MODE_REFLECT_Y) - src_y += vc4_state->src_h[0] - 1; + src_y += height - 1; + + src_x = vc4_state->src_x >> 16; switch (base_format_mod) { case DRM_FORMAT_MOD_LINEAR: @@ -871,13 +944,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * out. */ for (i = 0; i < num_planes; i++) { - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - fb->pitches[i]; - - vc4_state->offsets[i] += vc4_state->src_x / - (i ? h_subsample : 1) * - fb->format->cpp[i]; + offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i]; + offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i]; } break; @@ -898,7 +966,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * pitch * tile_h == tile_size * tiles_per_row */ u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); - u32 tiles_l = vc4_state->src_x >> tile_w_shift; + u32 tiles_l = src_x >> tile_w_shift; u32 tiles_r = tiles_w - tiles_l; u32 tiles_t = src_y >> tile_h_shift; /* Intra-tile offsets, which modify the base address (the @@ -908,7 +976,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, u32 tile_y = (src_y >> 4) & 1; u32 subtile_y = (src_y >> 2) & 3; u32 utile_y = src_y & 3; - u32 x_off = vc4_state->src_x & tile_w_mask; + u32 x_off = src_x & tile_w_mask; u32 y_off = src_y & tile_h_mask; /* When Y reflection is requested we must set the @@ -932,19 +1000,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane, VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); - vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); - vc4_state->offsets[0] += subtile_y << 8; - vc4_state->offsets[0] += utile_y << 4; + offsets[0] += tiles_t * (tiles_w << tile_size_shift); + offsets[0] += subtile_y << 8; + offsets[0] += utile_y << 4; /* Rows of tiles alternate left-to-right and right-to-left. */ if (tiles_t & 1) { pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR; - vc4_state->offsets[0] += (tiles_w - tiles_l) << - tile_size_shift; - vc4_state->offsets[0] -= (1 + !tile_y) << 10; + offsets[0] += (tiles_w - tiles_l) << tile_size_shift; + offsets[0] -= (1 + !tile_y) << 10; } else { - vc4_state->offsets[0] += tiles_l << tile_size_shift; - vc4_state->offsets[0] += tile_y << 10; + offsets[0] += tiles_l << tile_size_shift; + offsets[0] += tile_y << 10; } break; @@ -1004,7 +1071,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * of the 12-pixels in that 128-bit word is the * first pixel to be used */ - u32 remaining_pixels = vc4_state->src_x % 96; + u32 remaining_pixels = src_x % 96; u32 aligned = remaining_pixels / 12; u32 last_bits = remaining_pixels % 12; @@ -1026,18 +1093,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } pix_per_tile = tile_w / fb->format->cpp[0]; - x_off = (vc4_state->src_x % pix_per_tile) / + x_off = (src_x % pix_per_tile) / (i ? h_subsample : 1) * fb->format->cpp[i]; } - tile = vc4_state->src_x / pix_per_tile; + tile = src_x / pix_per_tile; - vc4_state->offsets[i] += param * tile_w * tile; - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - tile_w; - vc4_state->offsets[i] += x_off & ~(i ? 1 : 0); + offsets[i] += param * tile_w * tile; + offsets[i] += src_y / (i ? v_subsample : 1) * tile_w; + offsets[i] += x_off & ~(i ? 1 : 0); } pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); @@ -1050,6 +1115,30 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } + /* fetch an extra pixel if we don't actually line up with the left edge. */ + if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16)) + width++; + + /* same for the right side */ + if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) && + vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16)) + width++; + + /* now for the top */ + if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16)) + height++; + + /* and the bottom */ + if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) && + vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16)) + height++; + + /* For YUV444 the hardware wants double the width, otherwise it doesn't + * fetch full width of chroma + */ + if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444) + width <<= 1; + /* Don't waste cycles mixing with plane alpha if the set alpha * is opaque or there is no per-pixel alpha information. * In any case we use the alpha property value as the fixed alpha. @@ -1057,7 +1146,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | @@ -1092,10 +1181,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | vc4_hvs4_get_alpha_blend_mode(state) | - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1148,10 +1235,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Position Word 2: Source Image Size */ vc4_state->pos2_offset = vc4_state->dlist_count; vc4_dlist_write(vc4_state, - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER5_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER5_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER5_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER5_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1162,9 +1247,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * * The pointers may be any byte address. */ - vc4_state->ptr0_offset = vc4_state->dlist_count; - for (i = 0; i < num_planes; i++) - vc4_dlist_write(vc4_state, vc4_state->offsets[i]); + vc4_state->ptr0_offset[0] = vc4_state->dlist_count; + + for (i = 0; i < num_planes; i++) { + struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i); + + vc4_dlist_write(vc4_state, bo->dma_addr + fb->offsets[i] + offsets[i]); + } /* Pointer Context Word 0/1/2: Written by the HVS */ for (i = 0; i < num_planes; i++) @@ -1298,7 +1387,11 @@ static int vc4_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - return vc4_plane_allocate_lbm(new_plane_state); + ret = vc4_plane_allocate_lbm(new_plane_state); + if (ret) + return ret; + + return 0; } static void vc4_plane_atomic_update(struct drm_plane *plane, @@ -1362,13 +1455,13 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) * scanout will start from this address as soon as the FIFO * needs to refill with pixels. */ - writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); /* Also update the CPU-side dlist copy, so that any later * atomic updates that don't do a new modeset on our plane * also use our updated address. */ - vc4_state->dlist[vc4_state->ptr0_offset] = addr; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr; drm_dev_exit(idx); } @@ -1423,8 +1516,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, sizeof(vc4_state->y_scaling)); vc4_state->is_unity = new_vc4_state->is_unity; vc4_state->is_yuv = new_vc4_state->is_yuv; - memcpy(vc4_state->offsets, new_vc4_state->offsets, - sizeof(vc4_state->offsets)); vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill; /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ @@ -1432,8 +1523,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, new_vc4_state->dlist[vc4_state->pos0_offset]; vc4_state->dlist[vc4_state->pos2_offset] = new_vc4_state->dlist[vc4_state->pos2_offset]; - vc4_state->dlist[vc4_state->ptr0_offset] = - new_vc4_state->dlist[vc4_state->ptr0_offset]; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = + new_vc4_state->dlist[vc4_state->ptr0_offset[0]]; /* Note that we can't just call vc4_plane_write_dlist() * because that would smash the context data that the HVS is @@ -1443,8 +1534,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, &vc4_state->hw_dlist[vc4_state->pos0_offset]); writel(vc4_state->dlist[vc4_state->pos2_offset], &vc4_state->hw_dlist[vc4_state->pos2_offset]); - writel(vc4_state->dlist[vc4_state->ptr0_offset], - &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(vc4_state->dlist[vc4_state->ptr0_offset[0]], + &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); drm_dev_exit(idx); } @@ -1471,7 +1562,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || - old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || + old_vc4_state->ptr0_offset[0] != new_vc4_state->ptr0_offset[0] || vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state)) return -EINVAL; @@ -1481,7 +1572,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, for (i = 0; i < new_vc4_state->dlist_count; i++) { if (i == new_vc4_state->pos0_offset || i == new_vc4_state->pos2_offset || - i == new_vc4_state->ptr0_offset || + i == new_vc4_state->ptr0_offset[0] || (new_vc4_state->lbm_offset && i == new_vc4_state->lbm_offset)) continue; @@ -1632,7 +1723,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, }; for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { - if (!hvs_formats[i].hvs5_only || vc4->is_vc5) { + if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) { formats[num_formats] = hvs_formats[i].drm; num_formats++; } @@ -1647,7 +1738,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, return ERR_CAST(vc4_plane); plane = &vc4_plane->base; - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) drm_plane_helper_add(plane, &vc5_plane_helper_funcs); else drm_plane_helper_add(plane, &vc4_plane_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 8ac9515554f8..c55dec383929 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -777,6 +777,7 @@ enum { # define VC4_HD_VID_CTL_CLRSYNC BIT(24) # define VC4_HD_VID_CTL_CLRRGB BIT(23) # define VC4_HD_VID_CTL_BLANKPIX BIT(18) +# define VC4_HD_VID_CTL_BLANK_INSERT_EN BIT(16) # define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5) # define VC4_HD_CSC_CTL_ORDER_SHIFT 5 diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 1bda5010f15a..14079853338e 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) bool has_bin = args->bin_cl_size != 0; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->min_x_tile > args->max_x_tile || diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index bf5c4e36c94e..2423826c89eb 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) int vc4_v3d_pm_get(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->power_lock); @@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) void vc4_v3d_pm_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->power_lock); @@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) uint64_t seqno = 0; struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; try_again: @@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->bin_bo_lock); @@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *ref) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->bin_bo_lock); diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 0c17284bf6f5..5bf134968ade 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) struct drm_gem_dma_object *obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; if (hindex >= exec->bo_count) { @@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo, uint32_t utile_w = utile_width(cpp); uint32_t utile_h = utile_height(cpp); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return false; /* The shaded vertex format stores signed 12.4 fixed point @@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *dev, uint32_t dst_offset = 0; uint32_t src_offset = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; while (src_offset < len) { @@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_device *dev, uint32_t i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; for (i = 0; i < exec->shader_state_count; i++) { diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 9745f8810eca..2d74e786914c 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_object *shader_obj) struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; memset(&validation_state, 0, sizeof(validation_state)); diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ea06ff2aa4b4..fc884fb57b7e 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO_MENU && MMU select VIRTIO + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select VIRTIO_DMA_SHARED_BUFFER diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index e5a2665e50ea..ffca6e2e1c9a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -26,14 +26,15 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/wait.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> @@ -58,7 +59,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev) vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; } @@ -103,7 +104,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev) if (ret) goto err_deinit; - drm_fbdev_shmem_setup(vdev->priv, 32); + drm_client_setup(vdev->priv, NULL); + return 0; err_deinit: @@ -184,6 +186,8 @@ static const struct drm_driver driver = { .dumb_create = virtio_gpu_mode_dumb_create, .dumb_map_offset = virtio_gpu_mode_dumb_mmap, + DRM_FBDEV_SHMEM_DRIVER_OPS, + #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig index b9ecdebecb0b..9def079f685b 100644 --- a/drivers/gpu/drm/vkms/Kconfig +++ b/drivers/gpu/drm/vkms/Kconfig @@ -3,6 +3,7 @@ config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM && MMU + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select CRC32 diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index e7441b227b3c..3f0977d746be 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -187,8 +187,15 @@ static void blend(struct vkms_writeback_job *wb, const struct pixel_argb_u16 background_color = { .a = 0xffff }; - size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay; + size_t crtc_y_limit = crtc_state->base.mode.vdisplay; + /* + * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary + * complexity to avoid poor blending performance. + * + * The function vkms_compose_row() is used to read a line, pixel-by-pixel, into the staging + * buffer. + */ for (size_t y = 0; y < crtc_y_limit; y++) { fill_background(&background_color, output_buffer); @@ -263,7 +270,7 @@ static int compose_active_planes(struct vkms_writeback_job *active_wb, if (WARN_ON(check_format_funcs(crtc_state, active_wb))) return -EINVAL; - line_width = crtc_state->base.crtc->mode.hdisplay; + line_width = crtc_state->base.mode.hdisplay; stage_buffer.n_pixels = line_width; output_buffer.n_pixels = line_width; diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 40b4d084e3ce..bbf080d32d2c 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -64,8 +64,6 @@ static int vkms_enable_vblank(struct drm_crtc *crtc) struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct vkms_output *out = drm_crtc_to_vkms_output(crtc); - drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); out->vblank_hrtimer.function = &vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); @@ -232,6 +230,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) + __acquires(&vkms_output->lock) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -243,6 +242,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) + __releases(&vkms_output->lock) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -287,7 +287,12 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs); - drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE); + ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE); + if (ret) { + DRM_ERROR("Failed to set gamma size\n"); + return ret; + } + drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE); spin_lock_init(&vkms_out->lock); diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 0c1a713b7b7b..2d1e95cb66e5 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> @@ -112,6 +113,7 @@ static const struct drm_driver vkms_driver = { .release = vkms_release, .fops = &vkms_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -225,7 +227,7 @@ static int vkms_create(struct vkms_config *config) if (ret) goto out_devres; - drm_fbdev_shmem_setup(&vkms_device->drm, 0); + drm_client_setup(&vkms_device->drm, NULL); return 0; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5e46ea5b96dc..672fe191e239 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -25,6 +25,17 @@ #define VKMS_LUT_SIZE 256 +/** + * struct vkms_frame_info - Structure to store the state of a frame + * + * @fb: backing drm framebuffer + * @src: source rectangle of this frame in the source framebuffer, stored in 16.16 fixed-point form + * @dst: destination rectangle in the crtc buffer, stored in whole pixel units + * @map: see @drm_shadow_plane_state.data + * @rotation: rotation applied to the source. + * + * @src and @dst should have the same size modulo the rotation. + */ struct vkms_frame_info { struct drm_framebuffer *fb; struct drm_rect src, dst; @@ -52,9 +63,11 @@ struct vkms_writeback_job { }; /** - * vkms_plane_state - Driver specific plane state + * struct vkms_plane_state - Driver specific plane state * @base: base plane state * @frame_info: data required for composing computation + * @pixel_read: function to read a pixel in this plane. The creator of a struct vkms_plane_state + * must ensure that this pointer is valid */ struct vkms_plane_state { struct drm_shadow_plane_state base; @@ -73,29 +86,56 @@ struct vkms_color_lut { }; /** - * vkms_crtc_state - Driver specific CRTC state + * struct vkms_crtc_state - Driver specific CRTC state + * * @base: base CRTC state * @composer_work: work struct to compose and add CRC entries - * @n_frame_start: start frame number for computed CRC - * @n_frame_end: end frame number for computed CRC + * + * @num_active_planes: Number of active planes + * @active_planes: List containing all the active planes (counted by + * @num_active_planes). They should be stored in z-order. + * @active_writeback: Current active writeback job + * @gamma_lut: Look up table for gamma used in this CRTC + * @crc_pending: Protected by @vkms_output.composer_lock, true when the frame CRC is not computed + * yet. Used by vblank to detect if the composer is too slow. + * @wb_pending: Protected by @vkms_output.composer_lock, true when a writeback frame is requested. + * @frame_start: Protected by @vkms_output.composer_lock, saves the frame number before the start + * of the composition process. + * @frame_end: Protected by @vkms_output.composer_lock, saves the last requested frame number. + * This is used to generate enough CRC entries when the composition worker is too slow. */ struct vkms_crtc_state { struct drm_crtc_state base; struct work_struct composer_work; int num_active_planes; - /* stack of active planes for crc computation, should be in z order */ struct vkms_plane_state **active_planes; struct vkms_writeback_job *active_writeback; struct vkms_color_lut gamma_lut; - /* below four are protected by vkms_output.composer_lock */ bool crc_pending; bool wb_pending; u64 frame_start; u64 frame_end; }; +/** + * struct vkms_output - Internal representation of all output components in VKMS + * + * @crtc: Base CRTC in DRM + * @encoder: DRM encoder used for this output + * @connector: DRM connector used for this output + * @wb_connecter: DRM writeback connector used for this output + * @vblank_hrtimer: Timer used to trigger the vblank + * @period_ns: vblank period, in nanoseconds, used to configure @vblank_hrtimer and to compute + * vblank timestamps + * @composer_workq: Ordered workqueue for @composer_state.composer_work. + * @lock: Lock used to protect concurrent access to the composer + * @composer_enabled: Protected by @lock, true when the VKMS composer is active (crc needed or + * writeback) + * @composer_state: Protected by @lock, current state of this VKMS output + * @composer_lock: Lock used internally to protect @composer_state members + */ struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; @@ -103,28 +143,38 @@ struct vkms_output { struct drm_writeback_connector wb_connector; struct hrtimer vblank_hrtimer; ktime_t period_ns; - /* ordered wq for composer_work */ struct workqueue_struct *composer_workq; - /* protects concurrent access to composer */ spinlock_t lock; - /* protected by @lock */ bool composer_enabled; struct vkms_crtc_state *composer_state; spinlock_t composer_lock; }; -struct vkms_device; - +/** + * struct vkms_config - General configuration for VKMS driver + * + * @writeback: If true, a writeback buffer can be attached to the CRTC + * @cursor: If true, a cursor plane is created in the VKMS device + * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device + * @dev: Used to store the current VKMS device. Only set when the device is instantiated. + */ struct vkms_config { bool writeback; bool cursor; bool overlay; - /* only set when instantiated */ struct vkms_device *dev; }; +/** + * struct vkms_device - Description of a VKMS device + * + * @drm - Base device in DRM + * @platform - Associated platform device + * @output - Configuration and sub-components of the VKMS device + * @config: Configuration used in this VKMS device + */ struct vkms_device { struct drm_device drm; struct platform_device *platform; @@ -132,6 +182,10 @@ struct vkms_device { const struct vkms_config *config; }; +/* + * The following helpers are used to convert a member of a struct into its parent. + */ + #define drm_crtc_to_vkms_output(target) \ container_of(target, struct vkms_output, crtc) @@ -144,12 +198,33 @@ struct vkms_device { #define to_vkms_plane_state(target)\ container_of(target, struct vkms_plane_state, base.base) -/* CRTC */ +/** + * vkms_crtc_init() - Initialize a CRTC for VKMS + * @dev: DRM device associated with the VKMS buffer + * @crtc: uninitialized CRTC device + * @primary: primary plane to attach to the CRTC + * @cursor: plane to attach to the CRTC + */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); +/** + * vkms_output_init() - Initialize all sub-components needed for a VKMS device. + * + * @vkmsdev: VKMS device to initialize + * @index: CRTC which can be attached to the planes. The caller must ensure that + * @index is positive and less or equals to 31. + */ int vkms_output_init(struct vkms_device *vkmsdev, int index); +/** + * vkms_plane_init() - Initialize a plane + * + * @vkmsdev: VKMS device containing the plane + * @type: type of plane to initialize + * @index: CRTC which can be attached to the plane. The caller must ensure that + * @index is positive and less or equals to 31. + */ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, enum drm_plane_type type, int index); diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c index 040b7f113a3b..e8a5cc235ebb 100644 --- a/drivers/gpu/drm/vkms/vkms_formats.c +++ b/drivers/gpu/drm/vkms/vkms_formats.c @@ -9,24 +9,40 @@ #include "vkms_formats.h" +/** + * pixel_offset() - Get the offset of the pixel at coordinates x/y in the first plane + * + * @frame_info: Buffer metadata + * @x: The x coordinate of the wanted pixel in the buffer + * @y: The y coordinate of the wanted pixel in the buffer + * + * The caller must ensure that the framebuffer associated with this request uses a pixel format + * where block_h == block_w == 1. + * If this requirement is not fulfilled, the resulting offset can point to an other pixel or + * outside of the buffer. + */ static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y) { return frame_info->offset + (y * frame_info->pitch) + (x * frame_info->cpp); } -/* - * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates +/** + * packed_pixels_addr() - Get the pointer to the block containing the pixel at the given + * coordinates * * @frame_info: Buffer metadata - * @x: The x(width) coordinate of the 2D buffer - * @y: The y(Heigth) coordinate of the 2D buffer + * @x: The x (width) coordinate inside the plane + * @y: The y (height) coordinate inside the plane * * Takes the information stored in the frame_info, a pair of coordinates, and * returns the address of the first color channel. * This function assumes the channels are packed together, i.e. a color channel * comes immediately after another in the memory. And therefore, this function * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21). + * + * The caller must ensure that the framebuffer associated with this request uses a pixel format + * where block_h == block_w == 1, otherwise the returned pointer can be outside the buffer. */ static void *packed_pixels_addr(const struct vkms_frame_info *frame_info, int x, int y) @@ -51,6 +67,13 @@ static int get_x_position(const struct vkms_frame_info *frame_info, int limit, i return x; } +/* + * The following functions take pixel data from the buffer and convert them to the format + * ARGB16161616 in @out_pixel. + * + * They are used in the vkms_compose_row() function to handle multiple formats. + */ + static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel) { /* @@ -143,12 +166,11 @@ void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state } /* - * The following functions take an line of argb_u16 pixels from the - * src_buffer, convert them to a specific format, and store them in the - * destination. + * The following functions take one &struct pixel_argb_u16 and convert it to a specific format. + * The result is stored in @dst_pixels. * - * They are used in the `compose_active_planes` to convert and store a line - * from the src_buffer to the writeback buffer. + * They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to + * the writeback buffer. */ static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel) { @@ -214,6 +236,14 @@ static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel) *pixels = cpu_to_le16(r << 11 | g << 5 | b); } +/** + * vkms_writeback_row() - Generic loop for all supported writeback format. It is executed just + * after the blending to write a line in the writeback buffer. + * + * @wb: Job where to insert the final image + * @src_buffer: Line to write + * @y: Row to write in the writeback buffer + */ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y) { @@ -227,6 +257,13 @@ void vkms_writeback_row(struct vkms_writeback_job *wb, wb->pixel_write(dst_pixels, &in_pixels[x]); } +/** + * get_pixel_conversion_function() - Retrieve the correct read_pixel function for a specific + * format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that + * the pointer is valid before using it in a vkms_plane_state. + * + * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) + */ void *get_pixel_conversion_function(u32 format) { switch (format) { @@ -245,6 +282,13 @@ void *get_pixel_conversion_function(u32 format) } } +/** + * get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format. + * The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the + * pointer is valid before using it in a vkms_writeback_job. + * + * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) + */ void *get_pixel_write_function(u32 format) { switch (format) { diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 5ce70dd946aa..25a99fde126c 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -21,6 +21,7 @@ static int vkms_conn_get_modes(struct drm_connector *connector) { int count; + /* Use the default modes list from DRM */ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); @@ -58,6 +59,12 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) int writeback; unsigned int n; + /* + * Initialize used plane. One primary plane is required to perform the composition. + * + * The overlay and cursor planes are not mandatory, but can be used to perform complex + * composition. + */ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); if (IS_ERR(primary)) return PTR_ERR(primary); @@ -76,6 +83,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) return PTR_ERR(cursor); } + /* [1]: Allocation of a CRTC, its index will be BIT(0) = 1 */ ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base); if (ret) return ret; @@ -84,7 +92,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to init connector\n"); - goto err_connector; + return ret; } drm_connector_helper_add(connector, &vkms_conn_helper_funcs); @@ -95,7 +103,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) DRM_ERROR("Failed to init encoder\n"); goto err_encoder; } - encoder->possible_crtcs = 1; + /* + * This is a hardcoded value to select crtc for the encoder. + * BIT(0) here designate the first registered CRTC, the one allocated in [1] + */ + encoder->possible_crtcs = BIT(0); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { @@ -119,8 +131,5 @@ err_attach: err_encoder: drm_connector_cleanup(connector); -err_connector: - drm_crtc_cleanup(crtc); - return ret; } diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index bc724cbd5e3a..999d5c01ea81 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -131,8 +131,8 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn, struct drm_connector_state *conn_state = wb_conn->base.state; struct vkms_crtc_state *crtc_state = output->composer_state; struct drm_framebuffer *fb = connector_state->writeback_job->fb; - u16 crtc_height = crtc_state->base.crtc->mode.vdisplay; - u16 crtc_width = crtc_state->base.crtc->mode.hdisplay; + u16 crtc_height = crtc_state->base.mode.vdisplay; + u16 crtc_width = crtc_state->base.mode.hdisplay; struct vkms_writeback_job *active_wb; struct vkms_frame_info *wb_frame_info; u32 wb_format = fb->format->format; diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 6f1ac940cbae..6c3c2922ae8b 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -3,6 +3,7 @@ config DRM_VMWGFX tristate "DRM driver for VMware Virtual GPU" depends on DRM && PCI && MMU depends on (X86 && HYPERVISOR_GUEST) || ARM64 + select DRM_CLIENT_SELECTION select DRM_TTM select DRM_TTM_HELPER select MAPPING_DIRTY_HELPERS diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 3353e97687d1..a17e62867f3b 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -471,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) */ static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) { - return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; + return file_ref_get(&dmabuf->file->f_ref); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2825dd3149ed..2c46897876dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -35,7 +35,7 @@ #include "vmwgfx_vkms.h" #include "ttm_object.h" -#include <drm/drm_aperture.h> +#include <drm/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_ttm.h> #include <drm/drm_gem_ttm_helper.h> @@ -49,6 +49,8 @@ #ifdef CONFIG_X86 #include <asm/hypervisor.h> #endif + +#include <linux/aperture.h> #include <linux/cc_platform.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -859,8 +861,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) bool refuse_dma = false; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - dev_priv->drm.dev_private = dev_priv; - vmw_sw_context_init(dev_priv); mutex_init(&dev_priv->cmdbuf_mutex); @@ -1629,6 +1629,8 @@ static const struct drm_driver driver = { .prime_handle_to_fd = vmw_prime_handle_to_fd, .gem_prime_import_sg_table = vmw_prime_import_sg_table, + DRM_FBDEV_TTM_DRIVER_OPS, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, @@ -1653,7 +1655,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct vmw_private *vmw; int ret; - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) goto out_error; @@ -1680,7 +1682,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vmw_fifo_resource_inc(vmw); vmw_svga_enable(vmw); - drm_fbdev_ttm_setup(&vmw->drm, 0); + drm_client_setup(&vmw->drm, NULL); vmw_debugfs_gem_init(vmw); vmw_debugfs_resource_managers_init(vmw); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4e2807f5f94c..b21831ef214a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -639,7 +639,7 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_private *vmw_priv(struct drm_device *dev) { - return (struct vmw_private *)dev->dev_private; + return container_of(dev, struct vmw_private, drm); } static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 10d596cb4b40..8db38927729b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -280,7 +280,7 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, struct vmw_plane_state *vps) { - struct vmw_private *dev_priv = vcp->base.dev->dev_private; + struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); u32 i; u32 cursor_max_dim, mob_max_size; @@ -519,7 +519,7 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane) struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); u32 i; - vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); + vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7bbe46a98ff1..b51a2bde73e2 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -8,12 +8,14 @@ config DRM_XE select SHMEM select TMPFS select DRM_BUDDY + select DRM_CLIENT_SELECTION select DRM_EXEC select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n select DRM_PANEL select DRM_SUBALLOC_HELPER select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER @@ -49,7 +51,7 @@ config DRM_XE config DRM_XE_DISPLAY bool "Enable display support" - depends on DRM_XE && DRM_XE=m + depends on DRM_XE && DRM_XE=m && HAS_IOPORT select FB_IOMEM_HELPERS select I2C select I2C_ALGOBIT diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug index bc177368af6c..2de0de41b8dd 100644 --- a/drivers/gpu/drm/xe/Kconfig.debug +++ b/drivers/gpu/drm/xe/Kconfig.debug @@ -40,9 +40,21 @@ config DRM_XE_DEBUG_VM If in doubt, say "N". +config DRM_XE_DEBUG_MEMIRQ + bool "Enable extra memirq debugging" + default n + help + Choose this option to enable additional debugging info for + memory based interrupts. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_XE_DEBUG_SRIOV bool "Enable extra SR-IOV debugging" default n + select DRM_XE_DEBUG_MEMIRQ help Enable extra SR-IOV debugging info. diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index edfd812e0f41..bc7a04ce69fd 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -56,6 +56,7 @@ xe-y += xe_bb.o \ xe_gt_topology.o \ xe_guc.o \ xe_guc_ads.o \ + xe_guc_capture.o \ xe_guc_ct.o \ xe_guc_db_mgr.o \ xe_guc_hwconfig.o \ @@ -129,6 +130,7 @@ xe-$(CONFIG_PCI_IOV) += \ xe_gt_sriov_pf.o \ xe_gt_sriov_pf_config.o \ xe_gt_sriov_pf_control.o \ + xe_gt_sriov_pf_migration.o \ xe_gt_sriov_pf_monitor.o \ xe_gt_sriov_pf_policy.o \ xe_gt_sriov_pf_service.o \ @@ -148,7 +150,6 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \ -I$(src)/display/ext \ -I$(src)/compat-i915-headers \ -I$(srctree)/drivers/gpu/drm/i915/display/ \ - -Ddrm_i915_gem_object=xe_bo \ -Ddrm_i915_private=xe_device # Rule to build SOC code shared with i915 @@ -165,6 +166,7 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/ext/i915_irq.o \ display/ext/i915_utils.o \ + display/intel_bo.o \ display/intel_fb_bo.o \ display/intel_fbdev_fb.o \ display/xe_display.o \ @@ -180,7 +182,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ # SOC code shared with i915 xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-soc/intel_dram.o \ - i915-soc/intel_pch.o + i915-soc/intel_pch.o \ + i915-soc/intel_rom.o # Display code shared with i915 xe-$(CONFIG_DRM_XE_DISPLAY) += \ @@ -220,6 +223,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_dp_hdcp.o \ i915-display/intel_dp_link_training.o \ i915-display/intel_dp_mst.o \ + i915-display/intel_dp_test.o \ i915-display/intel_dpll.o \ i915-display/intel_dpll_mgr.o \ i915-display/intel_dpt_common.o \ @@ -248,6 +252,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_modeset_setup.o \ i915-display/intel_modeset_verify.o \ i915-display/intel_panel.o \ + i915-display/intel_pfit.o \ i915-display/intel_pmdemand.o \ i915-display/intel_pps.o \ i915-display/intel_psr.o \ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index 43ad4652c2b2..b54fe40fc5a9 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -176,6 +176,14 @@ enum xe_guc_sleep_state_status { #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) #define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) +enum xe_guc_state_capture_event_status { + XE_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0, + XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1, +}; + +#define XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF +#define XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN 1 + #define XE_GUC_TLB_INVAL_TYPE_SHIFT 0 #define XE_GUC_TLB_INVAL_MODE_SHIFT 8 /* Flush PPC or SMRO caches along with TLB invalidation request */ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h index 181180f5945c..b6a1852749dd 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h @@ -557,4 +557,65 @@ #define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn #define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96 GUC_HXG_REQUEST_MSG_n_DATAn +/** + * DOC: PF2GUC_SAVE_RESTORE_VF + * + * This message is used by the PF to migrate VF info state maintained by the GuC. + * + * This message must be sent as `CTB HXG Message`_. + * + * Available since GuC version 70.25.0 + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = **OPCODE** - operation to take: | + * | | | | + * | | | - _`GUC_PF_OPCODE_VF_SAVE` = 0 | + * | | | - _`GUC_PF_OPCODE_VF_RESTORE` = 1 | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_SAVE_RESTORE_VF` = 0x550B | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **ADDR_LO** - lower 32-bits of GGTT offset to the buffer | + * | | | where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **ADDR_HI** - upper 32-bits of GGTT offset to the buffer | + * | | | where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * | 4 | 27:0 | **SIZE** - size of the buffer (in dwords) | + * | +-------+--------------------------------------------------------------+ + * | | 31:28 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = **USED** - size of used buffer space (in dwords) | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_SAVE_RESTORE_VF 0x550Bu + +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 4u) +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC_PF_OPCODE_VF_SAVE 0u +#define GUC_PF_OPCODE_VF_RESTORE 1u +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE (0xfffffffu << 0) +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_MBZ (0xfu << 28) + +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_0_USED GUC_HXG_RESPONSE_MSG_0_DATA0 + #endif diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h new file mode 100644 index 000000000000..e7898edc6236 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _ABI_GUC_CAPTURE_ABI_H +#define _ABI_GUC_CAPTURE_ABI_H + +#include <linux/types.h> + +/* Capture List Index */ +enum guc_capture_list_index_type { + GUC_CAPTURE_LIST_INDEX_PF = 0, + GUC_CAPTURE_LIST_INDEX_VF = 1, +}; + +#define GUC_CAPTURE_LIST_INDEX_MAX (GUC_CAPTURE_LIST_INDEX_VF + 1) + +/* Register-types of GuC capture register lists */ +enum guc_state_capture_type { + GUC_STATE_CAPTURE_TYPE_GLOBAL = 0, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE +}; + +#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1) + +/* Class indecies for capture_class and capture_instance arrays */ +enum guc_capture_list_class_type { + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0, + GUC_CAPTURE_LIST_CLASS_VIDEO = 1, + GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2, + GUC_CAPTURE_LIST_CLASS_BLITTER = 3, + GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4, +}; + +#define GUC_CAPTURE_LIST_CLASS_MAX (GUC_CAPTURE_LIST_CLASS_GSC_OTHER + 1) + +/** + * struct guc_mmio_reg - GuC MMIO reg state struct + * + * GuC MMIO reg state struct + */ +struct guc_mmio_reg { + /** @offset: MMIO Offset - filled in by Host */ + u32 offset; + /** @value: MMIO Value - Used by Firmware to store value */ + u32 value; + /** @flags: Flags for accessing the MMIO */ + u32 flags; + /** @mask: Value of a mask to apply if mask with value is set */ + u32 mask; +#define GUC_REGSET_MASKED BIT(0) +#define GUC_REGSET_STEERING_NEEDED BIT(1) +#define GUC_REGSET_MASKED_WITH_VALUE BIT(2) +#define GUC_REGSET_RESTORE_ONLY BIT(3) +#define GUC_REGSET_STEERING_GROUP GENMASK(16, 12) +#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20) +} __packed; + +/** + * struct guc_mmio_reg_set - GuC register sets + * + * GuC register sets + */ +struct guc_mmio_reg_set { + /** @address: register address */ + u32 address; + /** @count: register count */ + u16 count; + /** @reserved: reserved */ + u16 reserved; +} __packed; + +/** + * struct guc_debug_capture_list_header - Debug capture list header. + * + * Debug capture list header. + */ +struct guc_debug_capture_list_header { + /** @info: contains number of MMIO descriptors in the capture list. */ + u32 info; +#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0) +} __packed; + +/** + * struct guc_debug_capture_list - Debug capture list + * + * As part of ADS registration, these header structures (followed by + * an array of 'struct guc_mmio_reg' entries) are used to register with + * GuC microkernel the list of registers we want it to dump out prior + * to a engine reset. + */ +struct guc_debug_capture_list { + /** @header: Debug capture list header. */ + struct guc_debug_capture_list_header header; + /** @regs: MMIO descriptors in the capture list. */ + struct guc_mmio_reg regs[]; +} __packed; + +/** + * struct guc_state_capture_header_t - State capture header. + * + * Prior to resetting engines that have hung or faulted, GuC microkernel + * reports the engine error-state (register values that was read) by + * logging them into the shared GuC log buffer using these hierarchy + * of structures. + */ +struct guc_state_capture_header_t { + /** + * @owner: VFID + * BR[ 7: 0] MBZ when SRIOV is disabled. When SRIOV is enabled + * VFID is an integer in range [0, 63] where 0 means the state capture + * is corresponding to the PF and an integer N in range [1, 63] means + * the state capture is for VF N. + */ + u32 owner; +#define GUC_STATE_CAPTURE_HEADER_VFID GENMASK(7, 0) + /** @info: Engine class/instance and capture type info */ + u32 info; +#define GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE GENMASK(3, 0) /* see guc_state_capture_type */ +#define GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS GENMASK(7, 4) /* see guc_capture_list_class_type */ +#define GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE GENMASK(11, 8) + /** + * @lrca: logical ring context address. + * if type-instance, LRCA (address) that hung, else set to ~0 + */ + u32 lrca; + /** + * @guc_id: context_index. + * if type-instance, context index of hung context, else set to ~0 + */ + u32 guc_id; + /** @num_mmio_entries: Number of captured MMIO entries. */ + u32 num_mmio_entries; +#define GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES GENMASK(9, 0) +} __packed; + +/** + * struct guc_state_capture_t - State capture. + * + * State capture + */ +struct guc_state_capture_t { + /** @header: State capture header. */ + struct guc_state_capture_header_t header; + /** @mmio_entries: Array of captured guc_mmio_reg entries. */ + struct guc_mmio_reg mmio_entries[]; +} __packed; + +/* State Capture Group Type */ +enum guc_state_capture_group_type { + GUC_STATE_CAPTURE_GROUP_TYPE_FULL = 0, + GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL +}; + +#define GUC_STATE_CAPTURE_GROUP_TYPE_MAX (GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL + 1) + +/** + * struct guc_state_capture_group_header_t - State capture group header + * + * State capture group header. + */ +struct guc_state_capture_group_header_t { + /** @owner: VFID */ + u32 owner; +#define GUC_STATE_CAPTURE_GROUP_HEADER_VFID GENMASK(7, 0) + /** @info: Engine class/instance and capture type info */ + u32 info; +#define GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES GENMASK(7, 0) +#define GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE GENMASK(15, 8) +} __packed; + +/** + * struct guc_state_capture_group_t - State capture group. + * + * this is the top level structure where an error-capture dump starts + */ +struct guc_state_capture_group_t { + /** @grp_header: State capture group header. */ + struct guc_state_capture_group_header_t grp_header; + /** @capture_entries: Array of state captures */ + struct guc_state_capture_t capture_entries[]; +} __packed; + +#endif diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 8f86a16dc577..f58198cf2cf6 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -52,6 +52,7 @@ struct guc_ct_buffer_desc { #define GUC_CTB_STATUS_OVERFLOW (1 << 0) #define GUC_CTB_STATUS_UNDERFLOW (1 << 1) #define GUC_CTB_STATUS_MISMATCH (1 << 2) +#define GUC_CTB_STATUS_DISABLED (1 << 3) u32 reserved[13]; } __packed; static_assert(sizeof(struct guc_ct_buffer_desc) == 64); diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 6b30743a2f6c..37606cf8cc5e 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -352,6 +352,7 @@ enum xe_guc_klv_ids { GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007, GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008, GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a, }; #endif diff --git a/drivers/gpu/drm/xe/abi/guc_log_abi.h b/drivers/gpu/drm/xe/abi/guc_log_abi.h new file mode 100644 index 000000000000..554630b7ccd9 --- /dev/null +++ b/drivers/gpu/drm/xe/abi/guc_log_abi.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _ABI_GUC_LOG_ABI_H +#define _ABI_GUC_LOG_ABI_H + +#include <linux/types.h> + +/* GuC logging buffer types */ +enum guc_log_buffer_type { + GUC_LOG_BUFFER_CRASH_DUMP, + GUC_LOG_BUFFER_DEBUG, + GUC_LOG_BUFFER_CAPTURE, +}; + +#define GUC_LOG_BUFFER_TYPE_MAX 3 + +/** + * struct guc_log_buffer_state - GuC log buffer state + * + * Below state structure is used for coordination of retrieval of GuC firmware + * logs. Separate state is maintained for each log buffer type. + * read_ptr points to the location where Xe read last in log buffer and + * is read only for GuC firmware. write_ptr is incremented by GuC with number + * of bytes written for each log entry and is read only for Xe. + * When any type of log buffer becomes half full, GuC sends a flush interrupt. + * GuC firmware expects that while it is writing to 2nd half of the buffer, + * first half would get consumed by Host and then get a flush completed + * acknowledgment from Host, so that it does not end up doing any overwrite + * causing loss of logs. So when buffer gets half filled & Xe has requested + * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr + * to the value of write_ptr and raise the interrupt. + * On receiving the interrupt Xe should read the buffer, clear flush_to_file + * field and also update read_ptr with the value of sample_write_ptr, before + * sending an acknowledgment to GuC. marker & version fields are for internal + * usage of GuC and opaque to Xe. buffer_full_cnt field is incremented every + * time GuC detects the log buffer overflow. + */ +struct guc_log_buffer_state { + /** @marker: buffer state start marker */ + u32 marker[2]; + /** @read_ptr: the last byte offset that was read by KMD previously */ + u32 read_ptr; + /** + * @write_ptr: the next byte offset location that will be written by + * GuC + */ + u32 write_ptr; + /** @size: Log buffer size */ + u32 size; + /** + * @sampled_write_ptr: Log buffer write pointer + * This is written by GuC to the byte offset of the next free entry in + * the buffer on log buffer half full or state capture notification + */ + u32 sampled_write_ptr; + /** + * @wrap_offset: wraparound offset + * This is the byte offset of location 1 byte after last valid guc log + * event entry written by Guc firmware before there was a wraparound. + * This field is updated by guc firmware and should be used by Host + * when copying buffer contents to file. + */ + u32 wrap_offset; + /** @flags: Flush to file flag and buffer full count */ + u32 flags; +#define GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE GENMASK(0, 0) +#define GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT GENMASK(4, 1) + /** @version: The Guc-Log-Entry format version */ + u32 version; +} __packed; + +#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h deleted file mode 100644 index 710cecca972d..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h +++ /dev/null @@ -1 +0,0 @@ -/* Empty */ diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h deleted file mode 100644 index 650ea2803a97..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _I915_GEM_MMAN_H_ -#define _I915_GEM_MMAN_H_ - -#include "xe_bo_types.h" -#include <drm/drm_prime.h> - -static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma) -{ - return drm_gem_prime_mmap(&bo->ttm.base, vma); -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h deleted file mode 100644 index 777c20ceabab..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _I915_GEM_OBJECT_H_ -#define _I915_GEM_OBJECT_H_ - -#include <linux/types.h> - -#include "xe_bo.h" - -#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */ - -static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) -{ - /* Should never be called */ - WARN_ON(1); - return n; -} - -static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo) -{ - /* legacy tiling is unused */ - return false; -} - -static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo) -{ - /* legacy tiling is unused */ - return false; -} - -static inline int i915_gem_object_read_from_page(struct xe_bo *bo, - u32 ofs, u64 *ptr, u32 size) -{ - struct ttm_bo_kmap_obj map; - void *src; - bool is_iomem; - int ret; - - ret = xe_bo_lock(bo, true); - if (ret) - return ret; - - ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map); - if (ret) - goto out_unlock; - - ofs &= ~PAGE_MASK; - src = ttm_kmap_obj_virtual(&map, &is_iomem); - src += ofs; - if (is_iomem) - memcpy_fromio(ptr, (void __iomem *)src, size); - else - memcpy(ptr, src, size); - - ttm_bo_kunmap(&map); -out_unlock: - xe_bo_unlock(bo); - return ret; -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h deleted file mode 100644 index 2a3f12d2978c..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _I915_GEM_OBJECT_FRONTBUFFER_H_ -#define _I915_GEM_OBJECT_FRONTBUFFER_H_ - -#define i915_gem_object_get_frontbuffer(obj) NULL -#define i915_gem_object_set_frontbuffer(obj, front) (front) - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h deleted file mode 100644 index 7d6bb1abab73..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* Copyright © 2024 Intel Corporation */ - -#ifndef __I915_GEM_OBJECT_TYPES_H__ -#define __I915_GEM_OBJECT_TYPES_H__ - -#include "xe_bo.h" - -#define to_intel_bo(x) gem_to_xe_bo((x)) - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index cb6c7598824b..9c4cf050059a 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, - ttm_bo_type_kernel, flags); + ttm_bo_type_kernel, flags, 0); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h deleted file mode 100644 index b4c47617b64b..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef __I915_DEBUGFS_H__ -#define __I915_DEBUGFS_H__ - -struct drm_i915_gem_object; -struct seq_file; - -static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} - -#endif /* __I915_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index f27a2c75b56d..84b0991b35b3 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -14,6 +14,7 @@ #include "i915_utils.h" #include "intel_runtime_pm.h" +#include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -66,19 +67,14 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE) #define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE) #define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE) +#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE) #define IS_HASWELL_ULT(dev_priv) (dev_priv && 0) #define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0) #define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0) -#define IP_VER(ver, rel) ((ver) << 8 | (rel)) - #define IS_MOBILE(xe) (xe && 0) -#define IS_LP(xe) ((xe) && 0) -#define IS_GEN9_LP(xe) ((xe) && 0) -#define IS_GEN9_BC(xe) ((xe) && 0) - #define IS_TIGERLAKE_UY(xe) (xe && 0) #define IS_COMETLAKE_ULX(xe) (xe && 0) #define IS_COFFEELAKE_ULX(xe) (xe && 0) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h deleted file mode 100644 index 98e9dd78f670..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef _I915_GPU_ERROR_H_ -#define _I915_GPU_ERROR_H_ - -struct drm_i915_error_state_buf; - -__printf(2, 3) -static inline void -i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) -{ -} - -#endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h index 8c7b315aa8ac..274042bff1be 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h @@ -20,18 +20,26 @@ static inline void enable_rpm_wakeref_asserts(void *rpm) { } +static inline bool +intel_runtime_pm_suspended(struct xe_runtime_pm *pm) +{ + struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); + + return pm_runtime_suspended(xe->drm.dev); +} + static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - return xe_pm_runtime_resume_and_get(xe); + return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL; } static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) { struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - return xe_pm_runtime_get_if_in_use(xe); + return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL; } static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm) @@ -39,7 +47,8 @@ static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); xe_pm_runtime_get_noresume(xe); - return true; + + return INTEL_WAKEREF_DEF; } static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) @@ -62,6 +71,6 @@ static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_ #define with_intel_runtime_pm(rpm, wf) \ for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = 0) + intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h index eb5b5f0e4bd9..0382beb4035b 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h @@ -10,11 +10,11 @@ #include "xe_device_types.h" #include "xe_mmio.h" -static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore) +static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); - return xe_root_mmio_gt(xe); + return xe_root_tile_mmio(xe); } static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore) @@ -29,7 +29,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline u8 intel_uncore_read8(struct intel_uncore *uncore, @@ -37,7 +37,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg); } static inline u16 intel_uncore_read16(struct intel_uncore *uncore, @@ -45,7 +45,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg); } static inline u64 @@ -57,11 +57,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore, u32 upper, lower, old_upper; int loop = 0; - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); do { old_upper = upper; - lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg); - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); + lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg); + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); } while (upper != old_upper && loop++ < 2); return (u64)upper << 32 | lower; @@ -72,7 +72,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write(struct intel_uncore *uncore, @@ -80,7 +80,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, @@ -88,7 +88,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set); + return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set); } static inline int intel_wait_for_register(struct intel_uncore *uncore, @@ -97,7 +97,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, timeout * USEC_PER_MSEC, NULL, false); } @@ -107,7 +107,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, timeout * USEC_PER_MSEC, NULL, false); } @@ -118,7 +118,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, fast_timeout_us + 1000 * slow_timeout_ms, out_value, false); } @@ -128,7 +128,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write_fw(struct intel_uncore *uncore, @@ -136,7 +136,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, @@ -144,7 +144,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); } static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, @@ -152,33 +152,9 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, { struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); } -static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore) -{ - struct xe_device *xe = container_of(uncore, struct xe_device, uncore); - - return xe_device_get_root_tile(xe)->mmio.regs; -} - -/* - * The raw_reg_{read,write} macros are intended as a micro-optimization for - * interrupt handlers so that the pointer indirection on uncore->regs can - * be computed once (and presumably cached in a register) instead of generating - * extra load instructions for each MMIO access. - * - * Given that these macros are only intended for non-GSI interrupt registers - * (and the goal is to avoid extra instructions generated by the compiler), - * these macros do not account for uncore->gsi_offset. Any caller that needs - * to use these macros on a GSI register is responsible for adding the - * appropriate GSI offset to the 'base' parameter. - */ -#define raw_reg_read(base, reg) \ - readl(base + i915_mmio_reg_offset(reg)) -#define raw_reg_write(base, reg, value) \ - writel(value, base + i915_mmio_reg_offset(reg)) - #define intel_uncore_forcewake_get(x, y) do { } while (0) #define intel_uncore_forcewake_put(x, y) do { } while (0) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h index ecb1c0707706..2a32faea9db5 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h @@ -5,4 +5,6 @@ #include <linux/types.h> -typedef unsigned long intel_wakeref_t; +typedef struct ref_tracker *intel_wakeref_t; + +#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT) diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h index c2c30ece8f77..5dfc587c8237 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h @@ -9,20 +9,14 @@ #include <linux/errno.h> #include <linux/types.h> -struct drm_i915_gem_object; +struct drm_gem_object; struct intel_pxp; static inline int intel_pxp_key_check(struct intel_pxp *pxp, - struct drm_i915_gem_object *obj, + struct drm_gem_object *obj, bool assign) { return -ENODEV; } -static inline bool -i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) -{ - return false; -} - #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h new file mode 100644 index 000000000000..05cbfb697b2b --- /dev/null +++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#include "../../../i915/soc/intel_rom.h" diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c index eb40f1cb44f6..a7dbc6554d69 100644 --- a/drivers/gpu/drm/xe/display/ext/i915_irq.c +++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c @@ -7,25 +7,24 @@ #include "i915_reg.h" #include "intel_uncore.h" -void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, - i915_reg_t iir, i915_reg_t ier) +void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs) { - intel_uncore_write(uncore, imr, 0xffffffff); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.imr, 0xffffffff); + intel_uncore_posting_read(uncore, regs.imr); - intel_uncore_write(uncore, ier, 0); + intel_uncore_write(uncore, regs.ier, 0); /* IIR can theoretically queue up two events. Be paranoid. */ - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); - intel_uncore_write(uncore, iir, 0xffffffff); - intel_uncore_posting_read(uncore, iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); + intel_uncore_write(uncore, regs.iir, 0xffffffff); + intel_uncore_posting_read(uncore, regs.iir); } /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) +void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) { struct xe_device *xe = container_of(uncore, struct xe_device, uncore); u32 val = intel_uncore_read(uncore, reg); @@ -42,16 +41,14 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_posting_read(uncore, reg); } -void gen3_irq_init(struct intel_uncore *uncore, - i915_reg_t imr, u32 imr_val, - i915_reg_t ier, u32 ier_val, - i915_reg_t iir) +void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs, + u32 imr_val, u32 ier_val) { - gen3_assert_iir_is_zero(uncore, iir); + gen2_assert_iir_is_zero(uncore, regs.iir); - intel_uncore_write(uncore, ier, ier_val); - intel_uncore_write(uncore, imr, imr_val); - intel_uncore_posting_read(uncore, imr); + intel_uncore_write(uncore, regs.ier, ier_val); + intel_uncore_write(uncore, regs.imr, imr_val); + intel_uncore_posting_read(uncore, regs.imr); } bool intel_irqs_enabled(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c new file mode 100644 index 000000000000..9f54fad0f1c0 --- /dev/null +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2024 Intel Corporation */ + +#include <drm/drm_gem.h> + +#include "xe_bo.h" +#include "intel_bo.h" + +bool intel_bo_is_tiled(struct drm_gem_object *obj) +{ + /* legacy tiling is unused */ + return false; +} + +bool intel_bo_is_userptr(struct drm_gem_object *obj) +{ + /* xe does not have userptr bos */ + return false; +} + +bool intel_bo_is_shmem(struct drm_gem_object *obj) +{ + return false; +} + +bool intel_bo_is_protected(struct drm_gem_object *obj) +{ + return false; +} + +void intel_bo_flush_if_display(struct drm_gem_object *obj) +{ +} + +int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + return drm_gem_prime_mmap(obj, vma); +} + +int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size) +{ + struct xe_bo *bo = gem_to_xe_bo(obj); + struct ttm_bo_kmap_obj map; + void *src; + bool is_iomem; + int ret; + + ret = xe_bo_lock(bo, true); + if (ret) + return ret; + + ret = ttm_bo_kmap(&bo->ttm, offset >> PAGE_SHIFT, 1, &map); + if (ret) + goto out_unlock; + + offset &= ~PAGE_MASK; + src = ttm_kmap_obj_virtual(&map, &is_iomem); + src += offset; + if (is_iomem) + memcpy_fromio(dst, (void __iomem *)src, size); + else + memcpy(dst, src, size); + + ttm_bo_kunmap(&map); +out_unlock: + xe_bo_unlock(bo); + return ret; +} + +struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj) +{ + return NULL; +} + +struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, + struct intel_frontbuffer *front) +{ + return front; +} + +void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index 63ce97cc4cfe..4d209ebc26c2 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -11,8 +11,10 @@ #include "intel_fb_bo.h" #include "xe_bo.h" -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) +void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj) { + struct xe_bo *bo = gem_to_xe_bo(obj); + if (bo->flags & XE_BO_FLAG_PINNED) { /* Unpin our kernel fb first */ xe_bo_lock(bo, false); @@ -23,9 +25,10 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) } int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, + struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(bo->ttm.base.dev); int ret; @@ -65,11 +68,11 @@ err: return ret; } -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) +struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_gem_object *bo; + struct xe_bo *bo; struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]); if (!gem) @@ -78,11 +81,11 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, bo = gem_to_xe_bo(gem); /* Require vram placement or dma-buf import */ if (IS_DGFX(i915) && - !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) && + !xe_bo_can_migrate(bo, XE_PL_VRAM0) && bo->ttm.type != ttm_bo_type_sg) { drm_gem_object_put(gem); return ERR_PTR(-EREMOTE); } - return bo; + return gem; } diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h deleted file mode 100644 index 5d365b925b7a..000000000000 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2021 Intel Corporation - */ - -#ifndef __INTEL_FB_BO_H__ -#define __INTEL_FB_BO_H__ - -struct drm_file; -struct drm_mode_fb_cmd2; -struct drm_i915_private; -struct intel_framebuffer; -struct xe_bo; - -void intel_fb_bo_framebuffer_fini(struct xe_bo *bo); -int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, - struct xe_bo *bo, - struct drm_mode_fb_cmd2 *mode_cmd); - -struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd); - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 99499d6c0256..ca95fcd098ec 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_fb_helper.h> #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fbdev_fb.h" #include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" @@ -20,7 +21,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct xe_device *xe = to_xe_device(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; + struct xe_bo *obj; int size; /* we don't do packed 24bpp */ @@ -64,13 +65,13 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, goto err; } - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); if (IS_ERR(fb)) { xe_bo_unpin_map_no_vm(obj); goto err; } - drm_gem_object_put(intel_bo_to_drm_bo(obj)); + drm_gem_object_put(&obj->ttm.base); return to_intel_framebuffer(fb); @@ -79,8 +80,9 @@ err: } int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, - struct drm_i915_gem_object *obj, struct i915_vma *vma) + struct drm_gem_object *_obj, struct i915_vma *vma) { + struct xe_bo *obj = gem_to_xe_bo(_obj); struct pci_dev *pdev = to_pci_dev(i915->drm.dev); if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { @@ -100,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info XE_WARN_ON(iosys_map_is_null(&obj->vmap)); info->screen_base = obj->vmap.vaddr_iomem; - info->screen_size = intel_bo_to_drm_bo(obj)->size; + info->screen_size = obj->ttm.base.size; return 0; } diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index c6e0c8d77a70..b5502f335f53 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -4,16 +4,16 @@ */ #include "xe_display.h" -#include "regs/xe_regs.h" +#include "regs/xe_irq_regs.h" #include <linux/fb.h> #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> #include <uapi/drm/xe_drm.h> #include "soc/intel_dram.h" -#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */ #include "intel_acpi.h" #include "intel_audio.h" #include "intel_bw.h" @@ -34,7 +34,7 @@ static bool has_display(struct xe_device *xe) { - return HAS_DISPLAY(xe); + return HAS_DISPLAY(&xe->display); } /** @@ -202,12 +202,14 @@ int xe_display_init(struct xe_device *xe) void xe_display_fini(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; intel_hpd_poll_fini(xe); - intel_hdcp_component_fini(xe); + intel_hdcp_component_fini(display); intel_audio_deinit(xe); } @@ -321,7 +323,9 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) * properly. */ intel_power_domains_disable(xe); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (!runtime && has_display(xe)) { drm_kms_helper_poll_disable(&xe->drm); intel_display_driver_disable_user_access(xe); @@ -330,7 +334,8 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) xe_display_flush_cleanup_work(xe); - intel_dp_mst_suspend(xe); + if (!runtime) + intel_dp_mst_suspend(xe); intel_hpd_cancel_work(xe); @@ -341,7 +346,7 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); - intel_dmc_suspend(xe); + intel_dmc_suspend(display); if (runtime && has_display(xe)) intel_hpd_poll_enable(xe); @@ -352,6 +357,36 @@ void xe_display_pm_suspend(struct xe_device *xe) __xe_display_pm_suspend(xe, false); } +void xe_display_pm_shutdown(struct xe_device *xe) +{ + struct intel_display *display = &xe->display; + + if (!xe->info.probe_display) + return; + + intel_power_domains_disable(xe); + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); + if (has_display(xe)) { + drm_kms_helper_poll_disable(&xe->drm); + intel_display_driver_disable_user_access(xe); + intel_display_driver_suspend(xe); + } + + xe_display_flush_cleanup_work(xe); + intel_dp_mst_suspend(xe); + intel_hpd_cancel_work(xe); + + if (has_display(xe)) + intel_display_driver_suspend_access(xe); + + intel_encoder_suspend_all(display); + intel_encoder_shutdown_all(display); + + intel_opregion_suspend(display, PCI_D3cold); + + intel_dmc_suspend(display); +} + void xe_display_pm_runtime_suspend(struct xe_device *xe) { if (!xe->info.probe_display) @@ -376,6 +411,19 @@ void xe_display_pm_suspend_late(struct xe_device *xe) intel_display_power_suspend_late(xe); } +void xe_display_pm_shutdown_late(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled. + */ + intel_power_domains_driver_remove(xe); +} + void xe_display_pm_resume_early(struct xe_device *xe) { if (!xe->info.probe_display) @@ -393,7 +441,7 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) if (!xe->info.probe_display) return; - intel_dmc_resume(xe); + intel_dmc_resume(display); if (has_display(xe)) drm_mode_config_reset(&xe->drm); @@ -405,7 +453,9 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_display_driver_resume_access(xe); /* MST sideband requires HPD interrupts enabled */ - intel_dp_mst_resume(xe); + if (!runtime) + intel_dp_mst_resume(xe); + if (!runtime && has_display(xe)) { intel_display_driver_resume(xe); drm_kms_helper_poll_enable(&xe->drm); @@ -417,7 +467,8 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_opregion_resume(display); - intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); + if (!runtime) + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); intel_power_domains_enable(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index bed55fd26f30..17afa537aee5 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -35,7 +35,9 @@ void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); void xe_display_pm_suspend(struct xe_device *xe); +void xe_display_pm_shutdown(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); +void xe_display_pm_shutdown_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); @@ -66,7 +68,9 @@ static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} static inline void xe_display_pm_suspend(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} +static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f99d901a3214..f95375451e2f 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -48,11 +48,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d if (!vma) return false; + /* Set scanout flag for WC mapping */ obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -73,5 +74,9 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(dsb_buf->vma->bo->tile->xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index b58fc4ba2aac..761510ae0690 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -79,12 +79,14 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); struct xe_ggtt *ggtt = tile0->mem.ggtt; - struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; u32 dpt_size, size = bo->ttm.base.size; if (view->type == I915_GTT_VIEW_NORMAL) @@ -98,23 +100,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM0 | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); else - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE); + dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + physical_alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -183,9 +191,11 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - struct i915_vma *vma) + struct i915_vma *vma, + u64 physical_alignment) { - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; u32 align; @@ -264,12 +274,14 @@ out: } static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, - const struct i915_gtt_view *view) + const struct i915_gtt_view *view, + u64 physical_alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - struct xe_bo *bo = intel_fb_obj(&fb->base); + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + struct xe_bo *bo = gem_to_xe_bo(obj); int ret; if (!vma) @@ -312,9 +324,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment); if (ret) goto err_unpin; @@ -355,7 +367,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, { *out_flags = 0; - return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment); } void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) @@ -366,13 +378,18 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) int intel_plane_pin_fb(struct intel_plane_state *plane_state) { struct drm_framebuffer *fb = plane_state->hw.fb; - struct xe_bo *bo = intel_fb_obj(fb); + struct drm_gem_object *obj = intel_fb_bo(fb); + struct xe_bo *bo = gem_to_xe_bo(obj); struct i915_vma *vma; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + u64 phys_alignment = plane->min_alignment(plane, fb, 0); /* We reject creating !SCANOUT fb's, so this is weird.. */ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); - vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); + vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment); + if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 6619a40aed15..7c02323e9531 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -30,26 +30,29 @@ struct intel_hdcp_gsc_message { #define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) -bool intel_hdcp_gsc_cs_required(struct xe_device *xe) +bool intel_hdcp_gsc_cs_required(struct intel_display *display) { - return DISPLAY_VER(xe) >= 14; + return DISPLAY_VER(display) >= 14; } -bool intel_hdcp_gsc_check_status(struct xe_device *xe) +bool intel_hdcp_gsc_check_status(struct intel_display *display) { + struct xe_device *xe = to_xe_device(display->drm); struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_gt *gt = tile->media_gt; struct xe_gsc *gsc = >->uc.gsc; bool ret = true; + unsigned int fw_ref; - if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) { + if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) { drm_dbg_kms(&xe->drm, "GSC Components not ready for HDCP2.x\n"); return false; } xe_pm_runtime_get(xe); - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) { drm_dbg_kms(&xe->drm, "failed to get forcewake to check proxy status\n"); ret = false; @@ -59,16 +62,17 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe) if (!xe_gsc_proxy_init_done(gsc)) ret = false; - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: xe_pm_runtime_put(xe); return ret; } /*This function helps allocate memory for the command that we will send to gsc cs */ -static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, +static int intel_hdcp_gsc_initialize_message(struct intel_display *display, struct intel_hdcp_gsc_message *hdcp_message) { + struct xe_device *xe = to_xe_device(display->drm); struct xe_bo *bo = NULL; u64 cmd_in, cmd_out; int ret = 0; @@ -80,7 +84,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, XE_BO_FLAG_GGTT); if (IS_ERR(bo)) { - drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); + drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n"); ret = PTR_ERR(bo); goto out; } @@ -96,7 +100,7 @@ out: return ret; } -static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) +static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message; int ret; @@ -110,14 +114,14 @@ static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ - ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message); + ret = intel_hdcp_gsc_initialize_message(display, hdcp_message); if (ret) { - drm_err(&xe->drm, "Could not initialize hdcp_message\n"); + drm_err(display->drm, "Could not initialize hdcp_message\n"); kfree(hdcp_message); return ret; } - xe->display.hdcp.hdcp_message = hdcp_message; + display->hdcp.hdcp_message = hdcp_message; return ret; } @@ -137,7 +141,7 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = { .close_hdcp_session = intel_hdcp_gsc_close_session, }; -int intel_hdcp_gsc_init(struct xe_device *xe) +int intel_hdcp_gsc_init(struct intel_display *display) { struct i915_hdcp_arbiter *data; int ret; @@ -146,33 +150,33 @@ int intel_hdcp_gsc_init(struct xe_device *xe) if (!data) return -ENOMEM; - mutex_lock(&xe->display.hdcp.hdcp_mutex); - xe->display.hdcp.arbiter = data; - xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev; - xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops; - ret = intel_hdcp_gsc_hdcp2_init(xe); + mutex_lock(&display->hdcp.hdcp_mutex); + display->hdcp.arbiter = data; + display->hdcp.arbiter->hdcp_dev = display->drm->dev; + display->hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(display); if (ret) kfree(data); - mutex_unlock(&xe->display.hdcp.hdcp_mutex); + mutex_unlock(&display->hdcp.hdcp_mutex); return ret; } -void intel_hdcp_gsc_fini(struct xe_device *xe) +void intel_hdcp_gsc_fini(struct intel_display *display) { struct intel_hdcp_gsc_message *hdcp_message = - xe->display.hdcp.hdcp_message; - struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; + display->hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = display->hdcp.arbiter; if (hdcp_message) { xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); kfree(hdcp_message); - xe->display.hdcp.hdcp_message = NULL; + display->hdcp.hdcp_message = NULL; } kfree(arb); - xe->display.hdcp.arbiter = NULL; + display->hdcp.arbiter = NULL; } static int xe_gsc_send_sync(struct xe_device *xe, diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index a50ab9eae40a..8c113463a3d5 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -170,7 +170,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; if (intel_framebuffer_init(to_intel_framebuffer(fb), - bo, &mode_cmd)) { + &bo->ttm.base, &mode_cmd)) { drm_dbg_kms(&xe->drm, "intel fb init failed\n"); goto err_bo; } @@ -248,7 +248,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, * the lookup of sysmem scratch pages. */ plane->check_plane(crtc_state, plane_state); - plane->async_flip(plane, crtc_state, plane_state, true); + plane->async_flip(NULL, plane, crtc_state, plane_state, true); return; nofb: diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 81b71903675e..7c78496e6213 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -186,6 +186,7 @@ #define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10) #define IECPUNIT_CLKGATE_DIS REG_BIT(22) +#define RAMDFTUNIT_CLKGATE_DIS REG_BIT(9) #define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18) #define ALNUNIT_CLKGATE_DIS REG_BIT(13) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index bd604b9f08e4..0c9e4b2fafab 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -286,6 +286,9 @@ #define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) #define LTCDD_CLKGATE_DIS REG_BIT(10) +#define UNSLCGCTL9454 XE_REG(0x9454) +#define LSCFE_CLKGATE_DIS REG_BIT(4) + #define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define L3_CLKGATE_DIS REG_BIT(16) @@ -344,6 +347,14 @@ #define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0) #define FORCEWAKE_RENDER XE_REG(0xa278) + +#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0) +#define MEDIA_SLICE3_AWAKE_STATUS REG_BIT(4) +#define MEDIA_SLICE2_AWAKE_STATUS REG_BIT(3) +#define MEDIA_SLICE1_AWAKE_STATUS REG_BIT(2) +#define RENDER_AWAKE_STATUS REG_BIT(1) +#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0) + #define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4) #define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4) #define FORCEWAKE_GSC XE_REG(0xa618) @@ -556,62 +567,6 @@ #define GT_PERF_STATUS XE_REG(0x1381b4) #define VOLTAGE_MASK REG_GENMASK(10, 0) -/* - * Note: Interrupt registers 1900xx are VF accessible only until version 12.50. - * On newer platforms, VFs are using memory-based interrupts instead. - * However, for simplicity we keep this XE_REG_OPTION_VF tag intact. - */ - -#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF) -#define INTR_GSC REG_BIT(31) -#define INTR_GUC REG_BIT(25) -#define INTR_MGUC REG_BIT(24) -#define INTR_BCS8 REG_BIT(23) -#define INTR_BCS(x) REG_BIT(15 - (x)) -#define INTR_CCS(x) REG_BIT(4 + (x)) -#define INTR_RCS0 REG_BIT(0) -#define INTR_VECS(x) REG_BIT(31 - (x)) -#define INTR_VCS(x) REG_BIT(x) - -#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF) -#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF) -#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF) -#define ENGINE1_MASK REG_GENMASK(31, 16) -#define ENGINE0_MASK REG_GENMASK(15, 0) -#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF) -#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF) -#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF) - -#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF) -#define INTR_DATA_VALID REG_BIT(31) -#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x) -#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x) -#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x) -#define OTHER_GUC_INSTANCE 0 -#define OTHER_GSC_HECI2_INSTANCE 3 -#define OTHER_GSC_INSTANCE 6 - -#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF) -#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF) -#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF) -#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF) -#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF) -#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF) -#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) -#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF) -#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF) -#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF) -#define CCS0_CCS1_INTR_MASK XE_REG(0x190100) -#define CCS2_CCS3_INTR_MASK XE_REG(0x190104) -#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110) -#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114) -#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118) -#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c) -#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) -#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8) -#define GSC_ER_COMPLETE REG_BIT(5) -#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4) -#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) -#define GT_RENDER_USER_INTERRUPT REG_BIT(0) +#define SFC_DONE(n) XE_REG(0x1cc000 + (n) * 0x1000) #endif diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h index a5fd14307f94..2118f7dec287 100644 --- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h @@ -84,6 +84,8 @@ #define HUC_LOADING_AGENT_GUC REG_BIT(1) #define GUC_WOPCM_OFFSET_VALID REG_BIT(0) #define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4) +#define GUC_PMTIMESTAMP_LO XE_REG(0xc3e8) +#define GUC_PMTIMESTAMP_HI XE_REG(0xc3ec) #define GUC_SEND_INTERRUPT XE_REG(0xc4c8) #define GUC_SEND_TRIGGER REG_BIT(0) diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h new file mode 100644 index 000000000000..1776b3f78ccb --- /dev/null +++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ +#ifndef _XE_IRQ_REGS_H_ +#define _XE_IRQ_REGS_H_ + +#include "regs/xe_reg_defs.h" + +#define PCU_IRQ_OFFSET 0x444e0 +#define GU_MISC_IRQ_OFFSET 0x444f0 +#define GU_MISC_GSE REG_BIT(27) + +#define DG1_MSTR_TILE_INTR XE_REG(0x190008) +#define DG1_MSTR_IRQ REG_BIT(31) +#define DG1_MSTR_TILE(t) REG_BIT(t) + +#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF) +#define MASTER_IRQ REG_BIT(31) +#define GU_MISC_IRQ REG_BIT(29) +#define DISPLAY_IRQ REG_BIT(16) +#define GT_DW_IRQ(x) REG_BIT(x) + +/* + * Note: Interrupt registers 1900xx are VF accessible only until version 12.50. + * On newer platforms, VFs are using memory-based interrupts instead. + * However, for simplicity we keep this XE_REG_OPTION_VF tag intact. + */ + +#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF) +#define INTR_GSC REG_BIT(31) +#define INTR_GUC REG_BIT(25) +#define INTR_MGUC REG_BIT(24) +#define INTR_BCS8 REG_BIT(23) +#define INTR_BCS(x) REG_BIT(15 - (x)) +#define INTR_CCS(x) REG_BIT(4 + (x)) +#define INTR_RCS0 REG_BIT(0) +#define INTR_VECS(x) REG_BIT(31 - (x)) +#define INTR_VCS(x) REG_BIT(x) + +#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF) +#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF) +#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF) +#define ENGINE1_MASK REG_GENMASK(31, 16) +#define ENGINE0_MASK REG_GENMASK(15, 0) +#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF) +#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF) +#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF) + +#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF) +#define INTR_DATA_VALID REG_BIT(31) +#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x) +#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x) +#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x) +#define OTHER_GUC_INSTANCE 0 +#define OTHER_GSC_HECI2_INSTANCE 3 +#define OTHER_GSC_INSTANCE 6 + +#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF) +#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF) +#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF) +#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF) +#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF) +#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF) +#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4) +#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF) +#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF) +#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF) +#define CCS0_CCS1_INTR_MASK XE_REG(0x190100) +#define CCS2_CCS3_INTR_MASK XE_REG(0x190104) +#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110) +#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114) +#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118) +#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c) +#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) +#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8) +#define GSC_ER_COMPLETE REG_BIT(5) +#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4) +#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) +#define GT_RENDER_USER_INTERRUPT REG_BIT(0) + +#endif diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h index 23f7dc5bbe99..51fd40ffafcb 100644 --- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h +++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h @@ -128,7 +128,7 @@ struct xe_reg_mcr { * options. */ #define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \ - .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ + .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ }) static inline bool xe_reg_is_valid(struct xe_reg r) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index dfa869f0dddd..3293172b0128 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -11,10 +11,6 @@ #define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12) #define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0) -#define PCU_IRQ_OFFSET 0x444e0 -#define GU_MISC_IRQ_OFFSET 0x444f0 -#define GU_MISC_GSE REG_BIT(27) - #define GU_CNTL_PROTECTED XE_REG(0x10100C) #define DRIVERINT_FLR_DIS REG_BIT(31) @@ -57,16 +53,6 @@ #define MTL_MPE_FREQUENCY XE_REG(0x13802c) #define MTL_RPE_MASK REG_GENMASK(8, 0) -#define DG1_MSTR_TILE_INTR XE_REG(0x190008) -#define DG1_MSTR_IRQ REG_BIT(31) -#define DG1_MSTR_TILE(t) REG_BIT(t) - -#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF) -#define MASTER_IRQ REG_BIT(31) -#define GU_MISC_IRQ REG_BIT(29) -#define DISPLAY_IRQ REG_BIT(16) -#define GT_DW_IRQ(x) REG_BIT(x) - #define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF) #define VF_CAP REG_BIT(0) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 8dac069483e8..3e0ae40ebbd2 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -6,6 +6,13 @@ #include <kunit/test.h> #include <kunit/visibility.h> +#include <linux/iosys-map.h> +#include <linux/math64.h> +#include <linux/prandom.h> +#include <linux/swap.h> + +#include <uapi/linux/sysinfo.h> + #include "tests/xe_kunit_helpers.h" #include "tests/xe_pci_test.h" #include "tests/xe_test.h" @@ -358,9 +365,242 @@ static void xe_bo_evict_kunit(struct kunit *test) evict_test_run_device(xe); } +struct xe_bo_link { + struct list_head link; + struct xe_bo *bo; + u32 val; +}; + +#define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M) + +static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state, + struct xe_bo_link *link) +{ + struct iosys_map map; + int ret = ttm_bo_vmap(&bo->ttm, &map); + size_t __maybe_unused i; + + if (ret) + return ret; + + for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) { + u32 val = prandom_u32_state(state); + + iosys_map_wr(&map, i, u32, val); + if (i == 0) + link->val = val; + } + + ttm_bo_vunmap(&bo->ttm, &map); + return 0; +} + +static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo, + unsigned int bo_nr, struct rnd_state *state, + struct xe_bo_link *link) +{ + struct iosys_map map; + int ret = ttm_bo_vmap(&bo->ttm, &map); + size_t i; + bool failed = false; + + if (ret) { + KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr); + return true; + } + + for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) { + u32 val = prandom_u32_state(state); + + if (iosys_map_rd(&map, i, u32) != val) { + KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx", + bo_nr, (unsigned long long)i); + kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n", + (unsigned int)iosys_map_rd(&map, i, u32), val); + if (i == 0 && val != link->val) + kunit_info(test, "Looks like PRNG is out of sync.\n"); + failed = true; + break; + } + } + + ttm_bo_vunmap(&bo->ttm, &map); + + return failed; +} + +/* + * Try to create system bos corresponding to twice the amount + * of available system memory to test shrinker functionality. + * If no swap space is available to accommodate the + * memory overcommit, mark bos purgeable. + */ +static int shrink_test_run_device(struct xe_device *xe) +{ + struct kunit *test = kunit_get_current_test(); + LIST_HEAD(bos); + struct xe_bo_link *link, *next; + struct sysinfo si; + u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit; + unsigned int interrupted = 0, successful = 0, count = 0; + struct rnd_state prng; + u64 rand_seed; + bool failed = false; + + rand_seed = get_random_u64(); + prandom_seed_state(&prng, rand_seed); + kunit_info(test, "Random seed is 0x%016llx.\n", + (unsigned long long)rand_seed); + + /* Skip if execution time is expected to be too long. */ + + limit = SZ_32G; + /* IGFX with flat CCS needs to copy when swapping / shrinking */ + if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe)) + limit = SZ_16G; + + si_meminfo(&si); + ram = (size_t)si.freeram * si.mem_unit; + if (ram > limit) { + kunit_skip(test, "Too long expected execution time.\n"); + return 0; + } + to_alloc = ram * 2; + + ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE; + if (to_alloc > ram_and_swap) + purgeable = to_alloc - ram_and_swap; + purgeable += div64_u64(purgeable, 5); + + kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n", + (unsigned long)ram); + for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) { + struct xe_bo *bo; + unsigned int mem_type; + struct xe_ttm_tt *xe_tt; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + KUNIT_FAIL(test, "Unexpected link allocation failure\n"); + failed = true; + break; + } + + INIT_LIST_HEAD(&link->link); + + /* We can create bos using WC caching here. But it is slower. */ + bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE, + DRM_XE_GEM_CPU_CACHING_WB, + XE_BO_FLAG_SYSTEM); + if (IS_ERR(bo)) { + if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) && + bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS)) + KUNIT_FAIL(test, "Error creating bo: %pe\n", bo); + kfree(link); + failed = true; + break; + } + xe_bo_lock(bo, false); + xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm); + + /* + * Allocate purgeable bos first, because if we do it the + * other way around, they may not be subject to swapping... + */ + if (alloced < purgeable) { + xe_tt->purgeable = true; + bo->ttm.priority = 0; + } else { + int ret = shrink_test_fill_random(bo, &prng, link); + + if (ret) { + xe_bo_unlock(bo); + xe_bo_put(bo); + KUNIT_FAIL(test, "Error filling bo with random data: %pe\n", + ERR_PTR(ret)); + kfree(link); + failed = true; + break; + } + } + + mem_type = bo->ttm.resource->mem_type; + xe_bo_unlock(bo); + link->bo = bo; + list_add_tail(&link->link, &bos); + + if (mem_type != XE_PL_TT) { + KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n", + bo->ttm.resource->mem_type); + failed = true; + } + cond_resched(); + if (signal_pending(current)) + break; + } + + /* + * Read back and destroy bos. Reset the pseudo-random seed to get an + * identical pseudo-random number sequence for readback. + */ + prandom_seed_state(&prng, rand_seed); + list_for_each_entry_safe(link, next, &bos, link) { + static struct ttm_operation_ctx ctx = {.interruptible = true}; + struct xe_bo *bo = link->bo; + struct xe_ttm_tt *xe_tt; + int ret; + + count++; + if (!signal_pending(current) && !failed) { + bool purgeable, intr = false; + + xe_bo_lock(bo, NULL); + + /* xe_tt->purgeable is cleared on validate. */ + xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm); + purgeable = xe_tt->purgeable; + do { + ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx); + if (ret == -EINTR) + intr = true; + } while (ret == -EINTR && !signal_pending(current)); + + if (!ret && !purgeable) + failed = shrink_test_verify(test, bo, count, &prng, link); + + xe_bo_unlock(bo); + if (ret) { + KUNIT_FAIL(test, "Validation failed: %pe\n", + ERR_PTR(ret)); + failed = true; + } else if (intr) { + interrupted++; + } else { + successful++; + } + } + xe_bo_put(link->bo); + list_del(&link->link); + kfree(link); + } + kunit_info(test, "Readbacks interrupted: %u successful: %u\n", + interrupted, successful); + + return 0; +} + +static void xe_bo_shrink_kunit(struct kunit *test) +{ + struct xe_device *xe = test->priv; + + shrink_test_run_device(xe); +} + static struct kunit_case xe_bo_tests[] = { KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param), KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param, + {.speed = KUNIT_SPEED_SLOW}), {} }; diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 79be73b4a02b..6f9b7a266b41 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -43,19 +43,18 @@ static void read_l3cc_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 l3cc, l3cc_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i >> 1)); mocs_dbg(gt, "reg_val=0x%x\n", reg_val); } else { @@ -72,7 +71,7 @@ static void read_l3cc_table(struct xe_gt *gt, KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc, "l3cc idx=%u has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void read_mocs_table(struct xe_gt *gt, @@ -80,21 +79,20 @@ static void read_mocs_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 mocs, mocs_expected; - unsigned int i; + unsigned int fw_ref, i; u32 reg_val; - u32 ret; KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index, "Unused entries index should have been defined\n"); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); mocs_expected = get_entry_control(info, i); mocs = reg_val; @@ -106,7 +104,7 @@ static void read_mocs_table(struct xe_gt *gt, "mocs reg 0x%x has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static int mocs_kernel_test_run_device(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h index e22bbf57fca7..04d6b95c6d87 100644 --- a/drivers/gpu/drm/xe/xe_assert.h +++ b/drivers/gpu/drm/xe/xe_assert.h @@ -10,7 +10,7 @@ #include <drm/drm_print.h> -#include "xe_device_types.h" +#include "xe_gt_types.h" #include "xe_step.h" /** diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 2a093540354e..ae6b337cdc54 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -283,6 +283,8 @@ struct xe_ttm_tt { struct device *dev; struct sg_table sgt; struct sg_table *sg; + /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ + bool purgeable; }; static int xe_tt_map_sg(struct ttm_tt *tt) @@ -468,7 +470,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.offset += vram->io_start; mem->bus.is_iomem = true; -#if !defined(CONFIG_X86) +#if !IS_ENABLED(CONFIG_X86) mem->bus.caching = ttm_write_combined; #endif return 0; @@ -761,7 +763,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, if (xe_rpm_reclaim_safe(xe)) { /* * We might be called through swapout in the validation path of - * another TTM device, so unconditionally acquire rpm here. + * another TTM device, so acquire rpm here. */ xe_pm_runtime_get(xe); } else { @@ -901,7 +903,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo) } } - ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); + ret = ttm_bo_populate(&bo->ttm, &ctx); if (ret) goto err_res_free; @@ -961,7 +963,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (ret) return ret; - ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); + ret = ttm_bo_populate(&bo->ttm, &ctx); if (ret) goto err_res_free; @@ -1089,6 +1091,33 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) } } +static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx) +{ + struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + + if (ttm_bo->ttm) { + struct ttm_placement place = {}; + int ret = ttm_bo_validate(ttm_bo, &place, ctx); + + drm_WARN_ON(&xe->drm, ret); + } +} + +static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false + }; + + if (ttm_bo->ttm) { + struct xe_ttm_tt *xe_tt = + container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_ttm_bo_purge(ttm_bo, &ctx); + } +} + const struct ttm_device_funcs xe_ttm_funcs = { .ttm_tt_create = xe_ttm_tt_create, .ttm_tt_populate = xe_ttm_tt_populate, @@ -1101,6 +1130,7 @@ const struct ttm_device_funcs xe_ttm_funcs = { .release_notify = xe_ttm_bo_release_notify, .eviction_valuable = ttm_bo_eviction_valuable, .delete_mem_notify = xe_ttm_bo_delete_mem_notify, + .swap_notify = xe_ttm_bo_swap_notify, }; static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) @@ -1431,7 +1461,8 @@ static struct xe_bo * __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - u16 cpu_caching, enum ttm_bo_type type, u32 flags) + u16 cpu_caching, enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo = NULL; int err; @@ -1460,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe, if (IS_ERR(bo)) return bo; + bo->min_align = alignment; + /* * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), * to ensure the shared resv doesn't disappear under the bo, the bo @@ -1500,16 +1533,18 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, u64 alignment) { - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags, alignment); } struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags) { - return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, + flags, 0); } struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, @@ -1519,7 +1554,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, { struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER); + flags | XE_BO_FLAG_USER, 0); if (!IS_ERR(bo)) xe_bo_unlock_vm_held(bo); @@ -1543,6 +1578,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile size_t size, u64 offset, enum ttm_bo_type type, u32 flags) { + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, + type, flags, 0); +} + +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment) +{ struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; @@ -1553,7 +1599,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + alignment); if (IS_ERR(bo)) return bo; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 6e4be52306df..7fa44a0138b0 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -77,7 +77,7 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags); + enum ttm_bo_type type, u32 flags, u64 alignment); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags); @@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags); +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment); struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, enum ttm_bo_type type, u32 flags); @@ -312,8 +318,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev) return round_down(max / 2, PAGE_SIZE); } -#define i915_gem_object_flush_if_display(obj) ((void)(obj)) - #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 2ed558ac2264..13c6d8a69e91 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -76,9 +76,11 @@ struct xe_bo { /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; -}; -#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base) -#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev) + /** @min_align: minimum alignment needed for this BO if different + * from default + */ + u64 min_align; +}; #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index fe4319eb13fd..492b4877433f 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -90,13 +90,32 @@ static int forcewake_open(struct inode *inode, struct file *file) { struct xe_device *xe = inode->i_private; struct xe_gt *gt; - u8 id; + u8 id, last_gt; + unsigned int fw_ref; xe_pm_runtime_get(xe); - for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + for_each_gt(gt, xe, id) { + last_gt = id; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + goto err_fw_get; + } return 0; + +err_fw_get: + for_each_gt(gt, xe, id) { + if (id < last_gt) + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + else if (id == last_gt) + xe_force_wake_put(gt_to_fw(gt), fw_ref); + else + break; + } + + xe_pm_runtime_put(xe); + return -ETIMEDOUT; } static int forcewake_release(struct inode *inode, struct file *file) @@ -106,7 +125,7 @@ static int forcewake_release(struct inode *inode, struct file *file) u8 id; for_each_gt(gt, xe, id) - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(xe); return 0; diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index bdb76e834e4c..d2679c5d976b 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -6,6 +6,7 @@ #include "xe_devcoredump.h" #include "xe_devcoredump_types.h" +#include <linux/ascii85.h> #include <linux/devcoredump.h> #include <generated/utsrelease.h> @@ -16,9 +17,12 @@ #include "xe_force_wake.h" #include "xe_gt.h" #include "xe_gt_printk.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" +#include "xe_guc_log.h" #include "xe_guc_submit.h" #include "xe_hw_engine.h" +#include "xe_module.h" #include "xe_sched_job.h" #include "xe_vm.h" @@ -85,9 +89,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, p = drm_coredump_printer(&iter); - drm_printf(&p, "**** Xe Device Coredump ****\n"); - drm_printf(&p, "kernel: " UTS_RELEASE "\n"); - drm_printf(&p, "module: " KBUILD_MODNAME "\n"); + drm_puts(&p, "**** Xe Device Coredump ****\n"); + drm_puts(&p, "kernel: " UTS_RELEASE "\n"); + drm_puts(&p, "module: " KBUILD_MODNAME "\n"); ts = ktime_to_timespec64(ss->snapshot_time); drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); @@ -96,20 +100,27 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, drm_printf(&p, "Process: %s\n", ss->process_name); xe_device_snapshot_print(xe, &p); - drm_printf(&p, "\n**** GuC CT ****\n"); - xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p); - xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p); + drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id); + drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id); - drm_printf(&p, "\n**** Job ****\n"); - xe_sched_job_snapshot_print(coredump->snapshot.job, &p); + drm_puts(&p, "\n**** GuC Log ****\n"); + xe_guc_log_snapshot_print(ss->guc.log, &p); + drm_puts(&p, "\n**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(ss->guc.ct, &p); - drm_printf(&p, "\n**** HW Engines ****\n"); + drm_puts(&p, "\n**** Contexts ****\n"); + xe_guc_exec_queue_snapshot_print(ss->ge, &p); + + drm_puts(&p, "\n**** Job ****\n"); + xe_sched_job_snapshot_print(ss->job, &p); + + drm_puts(&p, "\n**** HW Engines ****\n"); for (i = 0; i < XE_NUM_HW_ENGINES; i++) - if (coredump->snapshot.hwe[i]) - xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i], - &p); - drm_printf(&p, "\n**** VM state ****\n"); - xe_vm_snapshot_print(coredump->snapshot.vm, &p); + if (ss->hwe[i]) + xe_engine_snapshot_print(ss->hwe[i], &p); + + drm_puts(&p, "\n**** VM state ****\n"); + xe_vm_snapshot_print(ss->vm, &p); return count - iter.remain; } @@ -118,8 +129,14 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) { int i; - xe_guc_ct_snapshot_free(ss->ct); - ss->ct = NULL; + xe_guc_log_snapshot_free(ss->guc.log); + ss->guc.log = NULL; + + xe_guc_ct_snapshot_free(ss->guc.ct); + ss->guc.ct = NULL; + + xe_guc_capture_put_matched_nodes(&ss->gt->uc.guc); + ss->matched_node = NULL; xe_guc_exec_queue_snapshot_free(ss->ge); ss->ge = NULL; @@ -141,13 +158,15 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) { struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); + unsigned int fw_ref; /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); xe_vm_snapshot_capture_delayed(ss->vm); xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); /* Calculate devcoredump size */ ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); @@ -204,6 +223,7 @@ static void xe_devcoredump_free(void *data) /* To prevent stale data on next snapshot, clear everything */ memset(&coredump->snapshot, 0, sizeof(coredump->snapshot)); coredump->captured = false; + coredump->job = NULL; drm_info(&coredump_to_xe(coredump)->drm, "Xe device coredump has been deleted.\n"); } @@ -214,14 +234,13 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, struct xe_devcoredump_snapshot *ss = &coredump->snapshot; struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); - struct xe_hw_engine *hwe; - enum xe_hw_engine_id id; u32 adj_logical_mask = q->logical_mask; u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - int i; + unsigned int fw_ref; bool cookie; + int i; ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); @@ -231,6 +250,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, strscpy(ss->process_name, process_name); ss->gt = q->gt; + coredump->job = job; INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); cookie = dma_fence_begin_signalling(); @@ -244,26 +264,19 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, } /* keep going if fw fails as we still want to save the memory and SW data */ - if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); - coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true); - coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q); - coredump->snapshot.job = xe_sched_job_snapshot_capture(job); - coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm); + ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); + ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); + ss->ge = xe_guc_exec_queue_snapshot_capture(q); + ss->job = xe_sched_job_snapshot_capture(job); + ss->vm = xe_vm_snapshot_capture(q->vm); - for_each_hw_engine(hwe, q->gt, id) { - if (hwe->class != q->hwe->class || - !(BIT(hwe->logical_instance) & adj_logical_mask)) { - coredump->snapshot.hwe[id] = NULL; - continue; - } - coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe); - } + xe_engine_snapshot_capture_for_job(job); queue_work(system_unbound_wq, &ss->work); - xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); dma_fence_end_signalling(cookie); } @@ -310,3 +323,89 @@ int xe_devcoredump_init(struct xe_device *xe) } #endif + +/** + * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85 + * + * The output is split to multiple lines because some print targets, e.g. dmesg + * cannot handle arbitrarily long lines. Note also that printing to dmesg in + * piece-meal fashion is not possible, each separate call to drm_puts() has a + * line-feed automatically added! Therefore, the entire output line must be + * constructed in a local buffer first, then printed in one atomic output call. + * + * There is also a scheduler yield call to prevent the 'task has been stuck for + * 120s' kernel hang check feature from firing when printing to a slow target + * such as dmesg over a serial port. + * + * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down. + * + * @p: the printer object to output to + * @prefix: optional prefix to add to output string + * @blob: the Binary Large OBject to dump out + * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32) + * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32) + */ +void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, + const void *blob, size_t offset, size_t size) +{ + const u32 *blob32 = (const u32 *)blob; + char buff[ASCII85_BUFSZ], *line_buff; + size_t line_pos = 0; + +#define DMESG_MAX_LINE_LEN 800 +#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */ + + if (size & 3) + drm_printf(p, "Size not word aligned: %zu", size); + if (offset & 3) + drm_printf(p, "Offset not word aligned: %zu", size); + + line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL); + if (IS_ERR_OR_NULL(line_buff)) { + drm_printf(p, "Failed to allocate line buffer: %pe", line_buff); + return; + } + + blob32 += offset / sizeof(*blob32); + size /= sizeof(*blob32); + + if (prefix) { + strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2); + line_pos = strlen(line_buff); + + line_buff[line_pos++] = ':'; + line_buff[line_pos++] = ' '; + } + + while (size--) { + u32 val = *(blob32++); + + strscpy(line_buff + line_pos, ascii85_encode(val, buff), + DMESG_MAX_LINE_LEN - line_pos); + line_pos += strlen(line_buff + line_pos); + + if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) { + line_buff[line_pos++] = '\n'; + line_buff[line_pos++] = 0; + + drm_puts(p, line_buff); + + line_pos = 0; + + /* Prevent 'stuck thread' time out errors */ + cond_resched(); + } + } + + if (line_pos) { + line_buff[line_pos++] = '\n'; + line_buff[line_pos++] = 0; + + drm_puts(p, line_buff); + } + + kfree(line_buff); + +#undef MIN_SPACE +#undef DMESG_MAX_LINE_LEN +} diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h index e2fa65ce0932..a4eebc285fc8 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.h +++ b/drivers/gpu/drm/xe/xe_devcoredump.h @@ -6,6 +6,9 @@ #ifndef _XE_DEVCOREDUMP_H_ #define _XE_DEVCOREDUMP_H_ +#include <linux/types.h> + +struct drm_printer; struct xe_device; struct xe_sched_job; @@ -23,4 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe) } #endif +void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, + const void *blob, size_t offset, size_t size); + #endif diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h index 440d05d77a5a..3703ddea1252 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h @@ -34,16 +34,27 @@ struct xe_devcoredump_snapshot { /** @work: Workqueue for deferred capture outside of signaling context */ struct work_struct work; - /* GuC snapshots */ - /** @ct: GuC CT snapshot */ - struct xe_guc_ct_snapshot *ct; - /** @ge: Guc Engine snapshot */ + /** @guc: GuC snapshots */ + struct { + /** @guc.ct: GuC CT snapshot */ + struct xe_guc_ct_snapshot *ct; + /** @guc.log: GuC log snapshot */ + struct xe_guc_log_snapshot *log; + } guc; + + /** @ge: GuC Submission Engine snapshot */ struct xe_guc_submit_exec_queue_snapshot *ge; /** @hwe: HW Engine snapshot array */ struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES]; /** @job: Snapshot of job state */ struct xe_sched_job_snapshot *job; + /** + * @matched_node: The matched capture node for timedout job + * this single-node tracker works because devcoredump will always only + * produce one hw-engine capture per devcoredump event + */ + struct __guc_capture_parsed_output *matched_node; /** @vm: Snapshot of VM state */ struct xe_vm_snapshot *vm; @@ -69,6 +80,8 @@ struct xe_devcoredump { bool captured; /** @snapshot: Snapshot is captured at time of the first crash */ struct xe_devcoredump_snapshot snapshot; + /** @job: Point to the faulting job */ + struct xe_sched_job *job; }; #endif diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index a1987b554a8d..0e2dd691bdae 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -5,10 +5,11 @@ #include "xe_device.h" +#include <linux/aperture.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <linux/units.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_client.h> #include <drm/drm_gem_ttm_helper.h> @@ -301,7 +302,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe_display_driver_set_hooks(&driver); - err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + err = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (err) return ERR_PTR(err); @@ -373,6 +374,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, err: return ERR_PTR(err); } +ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ + +static bool xe_driver_flr_disabled(struct xe_device *xe) +{ + return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; +} /* * The driver-initiated FLR is the highest level of reset that we can trigger @@ -387,17 +394,12 @@ err: * if/when a new instance of i915 is bound to the device it will do a full * re-init anyway. */ -static void xe_driver_flr(struct xe_device *xe) +static void __xe_driver_flr(struct xe_device *xe) { const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); int ret; - if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { - drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); - return; - } - drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); /* @@ -409,25 +411,25 @@ static void xe_driver_flr(struct xe_device *xe) * is still pending (unless the HW is totally dead), but better to be * safe in case something unexpected happens */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); return; } - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); /* Trigger the actual Driver-FLR */ - xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); + xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); /* Wait for hardware teardown to complete */ - ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); + ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); return; } /* Wait for hardware/firmware re-init to complete */ - ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, + ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, flr_timeout, NULL, false); if (ret) { drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); @@ -435,7 +437,17 @@ static void xe_driver_flr(struct xe_device *xe) } /* Clear sticky completion status */ - xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); + xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS); +} + +static void xe_driver_flr(struct xe_device *xe) +{ + if (xe_driver_flr_disabled(xe)) { + drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); + return; + } + + __xe_driver_flr(xe); } static void xe_driver_flr_fini(void *arg) @@ -478,16 +490,15 @@ mask_err: return err; } -static bool verify_lmem_ready(struct xe_gt *gt) +static bool verify_lmem_ready(struct xe_device *xe) { - u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT; + u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; return !!val; } static int wait_for_lmem_ready(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); unsigned long timeout, start; if (!IS_DGFX(xe)) @@ -496,7 +507,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return 0; - if (verify_lmem_ready(gt)) + if (verify_lmem_ready(xe)) return 0; drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); @@ -525,13 +536,14 @@ static int wait_for_lmem_ready(struct xe_device *xe) msleep(20); - } while (!verify_lmem_ready(gt)); + } while (!verify_lmem_ready(xe)); drm_dbg(&xe->drm, "lmem ready after %ums", jiffies_to_msecs(jiffies - start)); return 0; } +ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */ static void update_device_info(struct xe_device *xe) { @@ -579,19 +591,21 @@ int xe_device_probe_early(struct xe_device *xe) return 0; } -static int xe_device_set_has_flat_ccs(struct xe_device *xe) +static int probe_has_flat_ccs(struct xe_device *xe) { + struct xe_gt *gt; + unsigned int fw_ref; u32 reg; - int err; + /* Always enabled/disabled, no runtime check to do */ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) return 0; - struct xe_gt *gt = xe_root_mmio_gt(xe); + gt = xe_root_mmio_gt(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); @@ -600,7 +614,8 @@ static int xe_device_set_has_flat_ccs(struct xe_device *xe) drm_dbg(&xe->drm, "Flat CCS has been disabled in bios, May lead to performance impact"); - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } int xe_device_probe(struct xe_device *xe) @@ -636,6 +651,13 @@ int xe_device_probe(struct xe_device *xe) err = xe_gt_init_early(gt); if (err) return err; + + /* + * Only after this point can GT-specific MMIO operations + * (including things like communication with the GuC) + * be performed. + */ + xe_gt_mmio_init(gt); } for_each_tile(tile, xe, id) { @@ -651,11 +673,9 @@ int xe_device_probe(struct xe_device *xe) err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; - if (IS_SRIOV_VF(xe)) { - err = xe_memirq_init(&tile->sriov.vf.memirq); - if (err) - return err; - } + err = xe_memirq_init(&tile->memirq); + if (err) + return err; } for_each_gt(gt, xe, id) { @@ -679,7 +699,7 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err; - err = xe_device_set_has_flat_ccs(xe); + err = probe_has_flat_ccs(xe); if (err) goto err; @@ -789,6 +809,24 @@ void xe_device_remove(struct xe_device *xe) void xe_device_shutdown(struct xe_device *xe) { + struct xe_gt *gt; + u8 id; + + drm_dbg(&xe->drm, "Shutting down device\n"); + + if (xe_driver_flr_disabled(xe)) { + xe_display_pm_shutdown(xe); + + xe_irq_suspend(xe); + + for_each_gt(gt, xe, id) + xe_gt_shutdown(gt); + + xe_display_pm_shutdown_late(xe); + } else { + /* BOOM! */ + __xe_driver_flr(xe); + } } /** @@ -802,11 +840,9 @@ void xe_device_shutdown(struct xe_device *xe) */ void xe_device_wmb(struct xe_device *xe) { - struct xe_gt *gt = xe_root_mmio_gt(xe); - wmb(); if (IS_DGFX(xe)) - xe_mmio_write32(gt, VF_CAP_REG, 0); + xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); } /** @@ -830,6 +866,7 @@ void xe_device_wmb(struct xe_device *xe) void xe_device_td_flush(struct xe_device *xe) { struct xe_gt *gt; + unsigned int fw_ref; u8 id; if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) @@ -844,10 +881,11 @@ void xe_device_td_flush(struct xe_device *xe) if (xe_gt_is_media_type(gt)) continue; - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; - xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); /* * FIXME: We can likely do better here with our choice of * timeout. Currently we just assume the worst case, i.e. 150us, @@ -855,36 +893,36 @@ void xe_device_td_flush(struct xe_device *xe) * scenario on current platforms if all cache entries are * transient and need to be flushed.. */ - if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, 150, NULL, false)) xe_gt_err_once(gt, "TD flush timeout\n"); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } } void xe_device_l2_flush(struct xe_device *xe) { struct xe_gt *gt; - int err; + unsigned int fw_ref; gt = xe_root_mmio_gt(xe); if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; spin_lock(>->global_invl_lock); - xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); + xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) @@ -919,6 +957,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) for_each_gt(gt, xe, id) { drm_printf(p, "GT id: %u\n", id); + drm_printf(p, "\tTile: %u\n", gt->tile->id); drm_printf(p, "\tType: %s\n", gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); drm_printf(p, "\tIP ver: %u.%u.%u\n", diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 34620ef855c0..f1fbfe916867 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -9,6 +9,8 @@ #include <drm/drm_util.h> #include "xe_device_types.h" +#include "xe_gt_types.h" +#include "xe_sriov.h" static inline struct xe_device *to_xe_device(const struct drm_device *dev) { @@ -138,7 +140,7 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe) static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt) { - return >->mmio.fw; + return >->pm.fw; } void xe_device_assert_mem_access(struct xe_device *xe); @@ -153,11 +155,22 @@ static inline bool xe_device_has_sriov(struct xe_device *xe) return xe->info.has_sriov; } +static inline bool xe_device_has_msix(struct xe_device *xe) +{ + /* TODO: change this when MSI-X support is fully integrated */ + return false; +} + static inline bool xe_device_has_memirq(struct xe_device *xe) { return GRAPHICS_VERx100(xe) >= 1250; } +static inline bool xe_device_uses_memirq(struct xe_device *xe) +{ + return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe)); +} + u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size); void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 687f3a9039bb..b9ea455d6f59 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -14,7 +14,6 @@ #include "xe_devcoredump_types.h" #include "xe_heci_gsc.h" -#include "xe_gt_types.h" #include "xe_lmtt_types.h" #include "xe_memirq_types.h" #include "xe_oa.h" @@ -108,6 +107,45 @@ struct xe_mem_region { }; /** + * struct xe_mmio - register mmio structure + * + * Represents an MMIO region that the CPU may use to access registers. A + * region may share its IO map with other regions (e.g., all GTs within a + * tile share the same map with their parent tile, but represent different + * subregions of the overall IO space). + */ +struct xe_mmio { + /** @tile: Backpointer to tile, used for tracing */ + struct xe_tile *tile; + + /** @regs: Map used to access registers. */ + void __iomem *regs; + + /** + * @sriov_vf_gt: Backpointer to GT. + * + * This pointer is only set for GT MMIO regions and only when running + * as an SRIOV VF structure + */ + struct xe_gt *sriov_vf_gt; + + /** + * @regs_size: Length of the register region within the map. + * + * The size of the iomap set in *regs is generally larger than the + * register mmio space since it includes unused regions and/or + * non-register regions such as the GGTT PTEs. + */ + size_t regs_size; + + /** @adj_limit: adjust MMIO address if address is below this value */ + u32 adj_limit; + + /** @adj_offset: offset to add to MMIO address when adjusting */ + u32 adj_offset; +}; + +/** * struct xe_tile - hardware tile structure * * From a driver perspective, a "tile" is effectively a complete GPU, containing @@ -148,26 +186,14 @@ struct xe_tile { * * 4MB-8MB: reserved * * 8MB-16MB: global GTT */ - struct { - /** @mmio.size: size of tile's MMIO space */ - size_t size; - - /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */ - void __iomem *regs; - } mmio; + struct xe_mmio mmio; /** * @mmio_ext: MMIO-extension info for a tile. * * Each tile has its own additional 256MB (28-bit) MMIO-extension space. */ - struct { - /** @mmio_ext.size: size of tile's additional MMIO-extension space */ - size_t size; - - /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */ - void __iomem *regs; - } mmio_ext; + struct xe_mmio mmio_ext; /** @mem: memory management info for tile */ struct { @@ -200,14 +226,14 @@ struct xe_tile { struct xe_lmtt lmtt; } pf; struct { - /** @sriov.vf.memirq: Memory Based Interrupts. */ - struct xe_memirq memirq; - /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */ struct xe_ggtt_node *ggtt_balloon[2]; } vf; } sriov; + /** @memirq: Memory Based Interrupts. */ + struct xe_memirq memirq; + /** @pcode: tile's PCODE */ struct { /** @pcode.lock: protecting tile's PCODE mailbox data */ diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index fb52a23e28f8..22f0f1a6dfd5 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -278,6 +278,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) struct xe_hw_engine *hwe; struct xe_exec_queue *q; u64 gpu_timestamp; + unsigned int fw_ref; xe_pm_runtime_get(xe); @@ -303,13 +304,16 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) continue; fw = xe_hw_engine_to_fw_domain(hwe); - if (xe_force_wake_get(gt_to_fw(gt), fw)) { + + fw_ref = xe_force_wake_get(gt_to_fw(gt), fw); + if (!xe_force_wake_ref_has_domain(fw_ref, fw)) { hwe = NULL; + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } gpu_timestamp = xe_hw_engine_read_timestamp(hwe); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); break; } diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 7deb480e26af..1158b6062a6c 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -143,7 +143,7 @@ struct xe_exec_queue { /** @hw_engine_group_link: link into exec queues in the same hw engine group */ struct list_head hw_engine_group_link; /** @lrc: logical ring context for this exec queue */ - struct xe_lrc *lrc[]; + struct xe_lrc *lrc[] __counted_by(width); }; /** diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index 6a59165b9569..a8c416a48812 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -44,6 +44,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, u32 ctx_id) { struct xe_gt *gt = hwe->gt; + struct xe_mmio *mmio = >->mmio; struct xe_device *xe = gt_to_xe(gt); u64 lrc_desc; @@ -58,7 +59,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, } if (hwe->class == XE_ENGINE_CLASS_COMPUTE) - xe_mmio_write32(hwe->gt, RCU_MODE, + xe_mmio_write32(mmio, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail); @@ -76,17 +77,17 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, */ wmb(); - xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base), + xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base), xe_bo_ggtt_addr(hwe->hwsp)); - xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base)); - xe_mmio_write32(gt, RING_MODE(hwe->mmio_base), + xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base)); + xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base), lower_32_bits(lrc_desc)); - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base), upper_32_bits(lrc_desc)); - xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base), + xe_mmio_write32(mmio, RING_EXECLIST_CONTROL(hwe->mmio_base), EL_CTRL_LOAD); } @@ -168,8 +169,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe) struct xe_gt *gt = hwe->gt; u32 hi, lo; - lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base)); - hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base)); + lo = xe_mmio_read32(>->mmio, RING_EXECLIST_STATUS_LO(hwe->mmio_base)); + hi = xe_mmio_read32(>->mmio, RING_EXECLIST_STATUS_HI(hwe->mmio_base)); return lo | (u64)hi << 32; } @@ -312,7 +313,7 @@ execlist_run_job(struct drm_sched_job *drm_job) q->ring_ops->emit_job(job); xe_execlist_make_active(exl); - return dma_fence_get(job->fence); + return job->fence; } static void execlist_job_free(struct drm_sched_job *drm_job) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 7d9fc489dcb8..4f6784e5abf8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -21,15 +21,25 @@ static const char *str_wake_sleep(bool wake) return wake ? "wake" : "sleep"; } -static void domain_init(struct xe_force_wake_domain *domain, +static void mark_domain_initialized(struct xe_force_wake *fw, + enum xe_force_wake_domain_id id) +{ + fw->initialized_domains |= BIT(id); +} + +static void init_domain(struct xe_force_wake *fw, enum xe_force_wake_domain_id id, struct xe_reg reg, struct xe_reg ack) { + struct xe_force_wake_domain *domain = &fw->domains[id]; + domain->id = id; domain->reg_ctl = reg; domain->reg_ack = ack; domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL); domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL); + + mark_domain_initialized(fw, id); } void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) @@ -43,13 +53,11 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (xe->info.graphics_verx100 >= 1270) { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT_MTL); } else { - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], - XE_FW_DOMAIN_ID_GT, + init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, FORCEWAKE_ACK_GT); } @@ -63,8 +71,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); if (!xe_gt_is_media_type(gt)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], - XE_FW_DOMAIN_ID_RENDER, + init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, FORCEWAKE_ACK_RENDER); @@ -72,8 +79,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, FORCEWAKE_MEDIA_VDBOX(j), FORCEWAKE_ACK_MEDIA_VDBOX(j)); } @@ -82,15 +88,13 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) if (!(gt->info.engine_mask & BIT(i))) continue; - domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j], - XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, + init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, FORCEWAKE_MEDIA_VEBOX(j), FORCEWAKE_ACK_MEDIA_VEBOX(j)); } if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)) - domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC], - XE_FW_DOMAIN_ID_GSC, + init_domain(fw, XE_FW_DOMAIN_ID_GSC, FORCEWAKE_GSC, FORCEWAKE_ACK_GSC); } @@ -100,7 +104,7 @@ static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, if (IS_SRIOV_VF(gt_to_xe(gt))) return; - xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0)); + xe_mmio_write32(>->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0)); } static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake) @@ -111,7 +115,7 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, if (IS_SRIOV_VF(gt_to_xe(gt))) return 0; - ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0, + ret = xe_mmio_wait32(>->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) @@ -156,52 +160,108 @@ static int domain_sleep_wait(struct xe_gt *gt, (ffs(tmp__) - 1))) && \ domain__->reg_ctl.addr) -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_get() : Increase the domain refcount + * @fw: struct xe_force_wake + * @domains: forcewake domains to get refcount on + * + * This function wakes up @domains if they are asleep and takes references. + * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized + * domains will be considered for refcount and it is a caller responsibility + * to check returned ref if it includes any specific domain by using + * xe_force_wake_ref_has_domain() function. Caller must call + * xe_force_wake_put() function to decrease incremented refcounts. + * + * Return: opaque reference to woken domains or zero if none of requested + * domains were awake. + */ +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, woken = 0; + unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0; + unsigned int tmp, ref_rqst; unsigned long flags; - int ret = 0; + xe_gt_assert(gt, is_power_of_2(domains)); + xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL); + xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains); + + ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) { if (!domain->ref++) { - woken |= BIT(domain->id); + awake_rqst |= BIT(domain->id); domain_wake(gt, domain); } + ref_incr |= BIT(domain->id); } - for_each_fw_domain_masked(domain, woken, fw, tmp) { - ret |= domain_wake_wait(gt, domain); + for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) { + if (domain_wake_wait(gt, domain) == 0) { + fw->awake_domains |= BIT(domain->id); + } else { + awake_failed |= BIT(domain->id); + --domain->ref; + } } - fw->awake_domains |= woken; + ref_incr &= ~awake_failed; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n", + str_plural(hweight_long(awake_failed)), awake_failed); + + if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains) + ref_incr |= XE_FORCEWAKE_ALL; + + return ref_incr; } -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +/** + * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0 + * @fw: Pointer to the force wake structure + * @fw_ref: return of xe_force_wake_get() + * + * This function reduces the reference counts for domains in fw_ref. If + * refcount for any of the specified domain reaches 0, it puts the domain to sleep + * and waits for acknowledgment for domain to sleep within 50 milisec timeout. + * Warns in case of timeout of ack from domain. + */ +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - enum xe_force_wake_domains tmp, sleep = 0; + unsigned int tmp, sleep = 0; unsigned long flags; - int ret = 0; + int ack_fail = 0; + + /* + * Avoid unnecessary lock and unlock when the function is called + * in error path of individual domains. + */ + if (!fw_ref) + return; + + if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + fw_ref = fw->initialized_domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, domains, fw, tmp) { + for_each_fw_domain_masked(domain, fw_ref, fw, tmp) { + xe_gt_assert(gt, domain->ref); + if (!--domain->ref) { sleep |= BIT(domain->id); domain_sleep(gt, domain); } } for_each_fw_domain_masked(domain, sleep, fw, tmp) { - ret |= domain_sleep_wait(gt, domain); + if (domain_sleep_wait(gt, domain) == 0) + fw->awake_domains &= ~BIT(domain->id); + else + ack_fail |= BIT(domain->id); } - fw->awake_domains &= ~sleep; spin_unlock_irqrestore(&fw->lock, flags); - return ret; + xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n", + str_plural(hweight_long(ack_fail)), ack_fail); } diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index a2577672f4e3..0e3e84bfa51c 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -15,10 +15,9 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw); void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw); -int xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); -int xe_force_wake_put(struct xe_force_wake *fw, - enum xe_force_wake_domains domains); +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains); +void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref); static inline int xe_force_wake_ref(struct xe_force_wake *fw, @@ -46,4 +45,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw, xe_gt_assert(fw->gt, fw->awake_domains & domain); } +/** + * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref + * @fw_ref : the force_wake reference + * @domain : forcewake domain to verify + * + * This function confirms whether the @fw_ref includes a reference to the + * specified @domain. + * + * Return: true if domain is refcounted. + */ +static inline bool +xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain) +{ + return fw_ref & domain; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h index ed0edc2cdf9f..899fbbcb3ea9 100644 --- a/drivers/gpu/drm/xe/xe_force_wake_types.h +++ b/drivers/gpu/drm/xe/xe_force_wake_types.h @@ -48,7 +48,7 @@ enum xe_force_wake_domains { XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2), XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3), XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC), - XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1 + XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) }; /** @@ -78,7 +78,9 @@ struct xe_force_wake { /** @lock: protects everything force wake struct */ spinlock_t lock; /** @awake_domains: mask of all domains awake */ - enum xe_force_wake_domains awake_domains; + unsigned int awake_domains; + /** @initialized_domains: mask of all initialized domains */ + unsigned int initialized_domains; /** @domains: force wake domains */ struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT]; }; diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index ff19eca5d358..558fac8bb6fb 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -5,6 +5,7 @@ #include "xe_ggtt.h" +#include <linux/fault-inject.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/sizes.h> @@ -107,8 +108,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev) static void ggtt_update_access_counter(struct xe_ggtt *ggtt) { - struct xe_gt *gt = XE_WA(ggtt->tile->primary_gt, 22019338487) ? ggtt->tile->primary_gt : - ggtt->tile->media_gt; + struct xe_tile *tile = ggtt->tile; + struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ? + tile->primary_gt : tile->media_gt; + struct xe_mmio *mmio = &affected_gt->mmio; u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63; /* * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit @@ -118,7 +121,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt) lockdep_assert_held(&ggtt->lock); if ((++ggtt->access_count % max_gtt_writes) == 0) { - xe_mmio_write32(gt, GMD_ID, 0x0); + xe_mmio_write32(mmio, GMD_ID, 0x0); ggtt->access_count = 0; } } @@ -243,7 +246,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) else ggtt->pt_ops = &xelp_pt_ops; - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0); + ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); drm_mm_init(&ggtt->mm, xe_wopcm_size(xe), ggtt->size - xe_wopcm_size(xe)); @@ -262,6 +265,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) return 0; } +ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt); @@ -405,7 +409,7 @@ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) * vs. correct GGTT page. Not particularly a hot code path so blindly * do a mmio read here which results in GuC reading correct GGTT page. */ - xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); /* Each GT in a tile has its own TLB to cache GGTT lookups */ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); @@ -609,7 +613,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { int err; - u64 alignment = XE_PAGE_SIZE; + u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) alignment = SZ_64K; diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 6fbea70d3d36..1eb791ddc375 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -34,6 +34,7 @@ #include "instructions/xe_gsc_commands.h" #include "regs/xe_gsc_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_irq_regs.h" static struct xe_gt * gsc_to_gt(struct xe_gsc *gsc) @@ -179,7 +180,7 @@ out_bo: static int gsc_fw_is_loaded(struct xe_gt *gt) { - return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) & + return xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) & HECI1_FWSTS1_INIT_COMPLETE; } @@ -190,7 +191,7 @@ static int gsc_fw_wait(struct xe_gt *gt) * executed by the GSCCS. To account for possible submission delays or * other issues, we use a 500ms timeout in the wait here. */ - return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE), + return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), HECI1_FWSTS1_INIT_COMPLETE, HECI1_FWSTS1_INIT_COMPLETE, 500 * USEC_PER_MSEC, NULL, false); @@ -260,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_tile *tile = gt_to_tile(gt); + unsigned int fw_ref; int ret; if (XE_WA(tile->primary_gt, 14018094691)) { - ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); /* * If the forcewake fails we want to keep going, because the worst * case outcome in failing to apply the WA is that PXP won't work, - * which is not fatal. We still throw a warning so the issue is - * seen if it happens. + * which is not fatal. Forcewake get warns implicitly in case of failure */ - xe_gt_WARN_ON(tile->primary_gt, ret); - xe_gt_mcr_multicast_write(tile->primary_gt, EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK, EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT); @@ -281,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc) ret = gsc_upload(gsc); if (XE_WA(tile->primary_gt, 14018094691)) - xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref); if (ret) return ret; @@ -330,7 +329,7 @@ static int gsc_er_complete(struct xe_gt *gt) * so in that scenario we're always guaranteed to find the correct * value. */ - er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE; + er_status = xe_mmio_read32(>->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE; if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) { /* @@ -351,6 +350,7 @@ static void gsc_work(struct work_struct *work) struct xe_gsc *gsc = container_of(work, typeof(*gsc), work); struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; u32 actions; int ret; @@ -360,7 +360,7 @@ static void gsc_work(struct work_struct *work) spin_unlock_irq(&gsc->lock); xe_pm_runtime_get(xe); - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); if (actions & GSC_ACTION_ER_COMPLETE) { ret = gsc_er_complete(gt); @@ -380,7 +380,7 @@ static void gsc_work(struct work_struct *work) xe_gsc_proxy_request_handler(gsc); out: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); } @@ -581,11 +581,11 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep) if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt)) return; - xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set); + xe_mmio_rmw32(>->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set); if (prep) { /* make sure the reset bit is clear when writing the CSR reg */ - xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), + xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), HECI_H_CSR_RST, HECI_H_CSR_IG); msleep(200); } @@ -599,7 +599,8 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep) void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) { struct xe_gt *gt = gsc_to_gt(gsc); - int err; + struct xe_mmio *mmio = >->mmio; + unsigned int fw_ref; xe_uc_fw_print(&gsc->fw, p); @@ -608,17 +609,17 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) if (!xe_uc_fw_is_enabled(&gsc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) return; drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", - xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); - - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); + + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index 2d6ea8c01445..fc64b45d324b 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -65,7 +65,7 @@ gsc_to_gt(struct xe_gsc *gsc) bool xe_gsc_proxy_init_done(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); - u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); + u32 fwsts1 = xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) == HECI1_FWSTS1_PROXY_STATE_NORMAL; @@ -78,7 +78,7 @@ static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) /* make sure we never accidentally write the RST bit */ clr |= HECI_H_CSR_RST; - xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); + xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); } static void gsc_proxy_irq_clear(struct xe_gsc *gsc) @@ -450,22 +450,21 @@ void xe_gsc_proxy_remove(struct xe_gsc *gsc) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_device *xe = gt_to_xe(gt); - int err = 0; + unsigned int fw_ref = 0; if (!gsc->proxy.component_added) return; /* disable HECI2 IRQs */ xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); + if (!fw_ref) xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n"); /* try do disable irq even if forcewake failed */ gsc_proxy_irq_toggle(gsc, false); - if (!err) - xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); xe_gsc_wait_for_worker_completion(gsc); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index d5fd6a089b7c..d6744be01a68 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -77,7 +77,8 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile) return ERR_PTR(-ENOMEM); gt->tile = tile; - gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0); + gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", + WQ_MEM_RECLAIM); err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); if (err) @@ -97,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; if (!xe_gt_is_media_type(gt)) { @@ -114,13 +115,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) } xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { + unsigned int fw_ref; u32 reg; - int err; if (!XE_WA(gt, 16023588340)) return; @@ -128,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (WARN_ON(err)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg &= ~CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -244,7 +245,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) else if (entry->clr_bits + 1) val = (reg.mcr ? xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(gt, reg)) & (~entry->clr_bits); + xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); else val = 0; @@ -402,11 +403,14 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + err = -ETIMEDOUT; goto err_hw_fence_irq; + } if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -439,16 +443,14 @@ static int gt_fw_domain_init(struct xe_gt *gt) * Stash hardware-reported version. Since this register does not exist * on pre-MTL platforms, reading it there will (correctly) return 0. */ - gt->info.gmdid = xe_mmio_read32(gt, GMD_ID); - - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - XE_WARN_ON(err); + gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -458,11 +460,14 @@ err_hw_fence_irq: static int all_fw_domain_init(struct xe_gt *gt) { + unsigned int fw_ref; int err, i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_hw_fence_irq; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_force_wake; + } xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); @@ -526,14 +531,12 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); -err_hw_fence_irq: + xe_force_wake_put(gt_to_fw(gt), fw_ref); for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -546,11 +549,12 @@ err_hw_fence_irq: */ int xe_gt_init_hwconfig(struct xe_gt *gt) { + unsigned int fw_ref; int err; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; xe_gt_mcr_init_early(gt); xe_pat_init(gt); @@ -568,8 +572,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_enable_host_l2_vram(gt); out_fw: - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -out: + xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } @@ -622,6 +625,30 @@ int xe_gt_init(struct xe_gt *gt) return 0; } +/** + * xe_gt_mmio_init() - Initialize GT's MMIO access + * @gt: the GT object + * + * Initialize GT's MMIO accessor, which will be used to access registers inside + * this GT. + */ +void xe_gt_mmio_init(struct xe_gt *gt) +{ + struct xe_tile *tile = gt_to_tile(gt); + + gt->mmio.regs = tile->mmio.regs; + gt->mmio.regs_size = tile->mmio.regs_size; + gt->mmio.tile = tile; + + if (gt->info.type == XE_GT_TYPE_MEDIA) { + gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; + gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; + } + + if (IS_SRIOV_VF(gt_to_xe(gt))) + gt->mmio.sriov_vf_gt = gt; +} + void xe_gt_record_user_engines(struct xe_gt *gt) { struct xe_hw_engine *hwe; @@ -649,8 +676,8 @@ static int do_gt_reset(struct xe_gt *gt) xe_gsc_wa_14015076503(gt, true); - xe_mmio_write32(gt, GDRST, GRDOM_FULL); - err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false); + xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL); + err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false); if (err) xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n", ERR_PTR(err)); @@ -740,6 +767,7 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { + unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -760,9 +788,11 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) - goto err_msg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + err = -ETIMEDOUT; + goto err_out; + } xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); @@ -780,8 +810,7 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -789,8 +818,7 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -err_msg: + xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -810,7 +838,7 @@ static void gt_reset_worker(struct work_struct *w) void xe_gt_reset_async(struct xe_gt *gt) { - xe_gt_info(gt, "trying reset\n"); + xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0)); /* Don't do a reset while one is already in flight */ if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc)) @@ -822,22 +850,25 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_stop_prepare(>->uc); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } int xe_gt_suspend(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = xe_uc_suspend(>->uc); @@ -848,19 +879,29 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_disable_host_l2_vram(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "suspended\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; } +void xe_gt_shutdown(struct xe_gt *gt) +{ + unsigned int fw_ref; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + do_gt_reset(gt); + xe_force_wake_put(gt_to_fw(gt), fw_ref); +} + /** * xe_gt_sanitize_freq() - Restore saved frequencies if necessary. * @gt: the GT object @@ -883,11 +924,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { + unsigned int fw_ref; int err; xe_gt_dbg(gt, "resuming\n"); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_msg; err = do_gt_restart(gt); @@ -896,14 +938,15 @@ int xe_gt_resume(struct xe_gt *gt) xe_gt_idle_enable_pg(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "resumed\n"); return 0; -err_force_wake: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); err_msg: + err = -ETIMEDOUT; +err_force_wake: + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index ee138e9768a2..82b9b7f82fca 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -31,6 +31,7 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile); int xe_gt_init_hwconfig(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt); +void xe_gt_mmio_init(struct xe_gt *gt); void xe_gt_declare_wedged(struct xe_gt *gt); int xe_gt_record_default_lrcs(struct xe_gt *gt); @@ -48,6 +49,7 @@ void xe_gt_record_user_engines(struct xe_gt *gt); void xe_gt_suspend_prepare(struct xe_gt *gt); int xe_gt_suspend(struct xe_gt *gt); +void xe_gt_shutdown(struct xe_gt *gt); int xe_gt_resume(struct xe_gt *gt); void xe_gt_reset_async(struct xe_gt *gt); void xe_gt_sanitize(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index ffcbd05671fc..b6adfb9f2030 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -74,7 +74,7 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) * platforms as these bits are unused there. */ mode |= CCS_MODE_CSLICE_0_3_MASK << 16; - xe_mmio_write32(gt, CCS_MODE, mode); + xe_mmio_write32(>->mmio, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", mode, config, num_engines, num_slices); diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index 86c2d62b4bdc..cc2ae159298e 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -17,7 +17,7 @@ static u32 read_reference_ts_freq(struct xe_gt *gt) { - u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE); + u32 ts_override = xe_mmio_read32(>->mmio, TIMESTAMP_OVERRIDE); u32 base_freq, frac_freq; base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK, @@ -57,7 +57,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg) int xe_gt_clock_init(struct xe_gt *gt) { - u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE); + u32 ctc_reg = xe_mmio_read32(>->mmio, CTC_MODE); u32 freq = 0; /* Assuming gen11+ so assert this assumption is correct */ @@ -66,7 +66,7 @@ int xe_gt_clock_init(struct xe_gt *gt) if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) { freq = read_reference_ts_freq(gt); } else { - u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0); + u32 c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); freq = get_crystal_clock_freq(c0); diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 8f95d3a5949b..3e8c351a0eab 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -15,6 +15,7 @@ #include "xe_ggtt.h" #include "xe_gt.h" #include "xe_gt_mcr.h" +#include "xe_gt_idle.h" #include "xe_gt_sriov_pf_debugfs.h" #include "xe_gt_sriov_vf_debugfs.h" #include "xe_gt_stats.h" @@ -89,26 +90,36 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) struct xe_device *xe = gt_to_xe(gt); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; - int err; + unsigned int fw_ref; xe_pm_runtime_get(xe); - err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (err) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { xe_pm_runtime_put(xe); - return err; + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; } for_each_hw_engine(hwe, gt, id) xe_hw_engine_print(hwe, p); - err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); - if (err) - return err; return 0; } +static int powergate_info(struct xe_gt *gt, struct drm_printer *p) +{ + int ret; + + xe_pm_runtime_get(gt_to_xe(gt)); + ret = xe_gt_idle_pg_print(gt, p); + xe_pm_runtime_put(gt_to_xe(gt)); + + return ret; +} + static int force_reset(struct xe_gt *gt, struct drm_printer *p) { xe_pm_runtime_get(gt_to_xe(gt)); @@ -288,6 +299,7 @@ static const struct drm_info_list debugfs_list[] = { {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, + {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info}, {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds}, {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index ab76973f3e1e..6bd39b2c5003 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -11,9 +11,9 @@ #include <drm/drm_managed.h> #include <drm/drm_print.h> -#include "xe_device_types.h" #include "xe_gt_sysfs.h" #include "xe_gt_throttle.h" +#include "xe_gt_types.h" #include "xe_guc_pc.h" #include "xe_pm.h" diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 67aba4140510..fd80afeef56a 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -98,7 +98,10 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency) void xe_gt_idle_enable_pg(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); - u32 pg_enable; + struct xe_gt_idle *gtidle = >->gtidle; + struct xe_mmio *mmio = >->mmio; + u32 vcs_mask, vecs_mask; + unsigned int fw_ref; int i, j; if (IS_SRIOV_VF(xe)) @@ -110,39 +113,136 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) xe_device_assert_mem_access(gt_to_xe(gt)); - pg_enable = RENDER_POWERGATE_ENABLE | MEDIA_POWERGATE_ENABLE; + vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE); + vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); + + if (vcs_mask || vecs_mask) + gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE; + + if (!xe_gt_is_media_type(gt)) + gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE; for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) { if ((gt->info.engine_mask & BIT(i))) - pg_enable |= (VDN_HCP_POWERGATE_ENABLE(j) | - VDN_MFXVDENC_POWERGATE_ENABLE(j)); + gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) | + VDN_MFXVDENC_POWERGATE_ENABLE(j)); } - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.skip_guc_pc) { /* * GuC sets the hysteresis value when GuC PC is enabled * else set it to 25 (25 * 1.28us) */ - xe_mmio_write32(gt, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25); - xe_mmio_write32(gt, RENDER_POWERGATE_IDLE_HYSTERESIS, 25); + xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25); + xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25); } - xe_mmio_write32(gt, POWERGATE_ENABLE, pg_enable); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } void xe_gt_idle_disable_pg(struct xe_gt *gt) { + struct xe_gt_idle *gtidle = >->gtidle; + unsigned int fw_ref; + if (IS_SRIOV_VF(gt_to_xe(gt))) return; xe_device_assert_mem_access(gt_to_xe(gt)); - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + gtidle->powergate_enable = 0; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + xe_mmio_write32(>->mmio, POWERGATE_ENABLE, gtidle->powergate_enable); + xe_force_wake_put(gt_to_fw(gt), fw_ref); +} + +/** + * xe_gt_idle_pg_print - Xe powergating info + * @gt: GT object + * @p: drm_printer. + * + * This function prints the powergating information + * + * Return: 0 on success, negative error code otherwise + */ +int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) +{ + struct xe_gt_idle *gtidle = >->gtidle; + struct xe_device *xe = gt_to_xe(gt); + enum xe_gt_idle_state state; + u32 pg_enabled, pg_status = 0; + u32 vcs_mask, vecs_mask; + unsigned int fw_ref; + int n; + /* + * Media Slices + * + * Slice 0: VCS0, VCS1, VECS0 + * Slice 1: VCS2, VCS3, VECS1 + * Slice 2: VCS4, VCS5, VECS2 + * Slice 3: VCS6, VCS7, VECS3 + */ + static const struct { + u64 engines; + u32 status_bit; + } media_slices[] = { + {(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) | + BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS}, + + {(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) | + BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS}, - xe_mmio_write32(gt, POWERGATE_ENABLE, 0); + {(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) | + BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS}, - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); + {(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) | + BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS}, + }; + + if (xe->info.platform == XE_PVC) { + drm_printf(p, "Power Gating not supported\n"); + return 0; + } + + state = gtidle->idle_status(gtidle_to_pc(gtidle)); + pg_enabled = gtidle->powergate_enable; + + /* Do not wake the GT to read powergating status */ + if (state != GT_IDLE_C6) { + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + + pg_enabled = xe_mmio_read32(>->mmio, POWERGATE_ENABLE); + pg_status = xe_mmio_read32(>->mmio, POWERGATE_DOMAIN_STATUS); + + xe_force_wake_put(gt_to_fw(gt), fw_ref); + } + + if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) { + drm_printf(p, "Render Power Gating Enabled: %s\n", + str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE)); + + drm_printf(p, "Render Power Gate Status: %s\n", + str_up_down(pg_status & RENDER_AWAKE_STATUS)); + } + + vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE); + vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE); + + /* Print media CPG status only if media is present */ + if (vcs_mask || vecs_mask) { + drm_printf(p, "Media Power Gating Enabled: %s\n", + str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE)); + + for (n = 0; n < ARRAY_SIZE(media_slices); n++) + if (gt->info.engine_mask & media_slices[n].engines) + drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n, + str_up_down(pg_status & media_slices[n].status_bit)); + } + return 0; } static ssize_t name_show(struct device *dev, @@ -201,13 +301,14 @@ static void gt_idle_fini(void *arg) { struct kobject *kobj = arg; struct xe_gt *gt = kobj_to_gt(kobj->parent); + unsigned int fw_ref; xe_gt_idle_disable_pg(gt); if (gt_to_xe(gt)->info.skip_guc_pc) { - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); xe_gt_idle_disable_c6(gt); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } sysfs_remove_files(kobj, gt_idle_attrs); @@ -260,9 +361,9 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt) return; /* Units of 1280 ns for a total of 5s */ - xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA); + xe_mmio_write32(>->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA); /* Enable RC6 */ - xe_mmio_write32(gt, RC_CONTROL, + xe_mmio_write32(>->mmio, RC_CONTROL, RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE); } @@ -274,6 +375,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt) if (IS_SRIOV_VF(gt_to_xe(gt))) return; - xe_mmio_write32(gt, RC_CONTROL, 0); - xe_mmio_write32(gt, RC_STATE, 0); + xe_mmio_write32(>->mmio, RC_CONTROL, 0); + xe_mmio_write32(>->mmio, RC_STATE, 0); } diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h index 554447b5d46d..4455a6501cb0 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.h +++ b/drivers/gpu/drm/xe/xe_gt_idle.h @@ -8,6 +8,7 @@ #include "xe_gt_idle_types.h" +struct drm_printer; struct xe_gt; int xe_gt_idle_init(struct xe_gt_idle *gtidle); @@ -15,5 +16,6 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt); void xe_gt_idle_disable_c6(struct xe_gt *gt); void xe_gt_idle_enable_pg(struct xe_gt *gt); void xe_gt_idle_disable_pg(struct xe_gt *gt); +int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p); #endif /* _XE_GT_IDLE_H_ */ diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h index f99b447534f3..b8b297a3f884 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle_types.h +++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h @@ -23,6 +23,8 @@ enum xe_gt_idle_state { struct xe_gt_idle { /** @name: name */ char name[16]; + /** @powergate_enable: copy of powergate enable bits */ + u32 powergate_enable; /** @residency_multiplier: residency multiplier in ns */ u32 residency_multiplier; /** @cur_residency: raw driver copy of idle residency */ diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index c834f64b0178..5013d674e17d 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -237,13 +237,26 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = { {}, }; +static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = { + { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */ + { 0x384900, 0x384AFF }, /* GAM */ + { 0x389560, 0x3895FF }, /* MEDIAINF */ + { 0x38B600, 0x38B8FF }, /* L3BANK */ + { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */ + { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */ + { 0x393C00, 0x393C7F }, /* MEDIAINF */ + {}, +}; + static void init_steering_l3bank(struct xe_gt *gt) { + struct xe_mmio *mmio = >->mmio; + if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, - xe_mmio_read32(gt, XEHP_FUSE4)); + xe_mmio_read32(mmio, XEHP_FUSE4)); /* * Group selects mslice, instance selects bank within mslice. @@ -254,7 +267,7 @@ static void init_steering_l3bank(struct xe_gt *gt) bank_mask & BIT(0) ? 0 : 2; } else if (gt_to_xe(gt)->info.platform == XE_DG2) { u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(mmio, MIRROR_FUSE3)); u32 bank = __ffs(mslice_mask) * 8; /* @@ -266,7 +279,7 @@ static void init_steering_l3bank(struct xe_gt *gt) gt->steering[L3BANK].instance_target = bank & 0x3; } else { u32 fuse = REG_FIELD_GET(L3BANK_MASK, - ~xe_mmio_read32(gt, MIRROR_FUSE3)); + ~xe_mmio_read32(mmio, MIRROR_FUSE3)); gt->steering[L3BANK].group_target = 0; /* unused */ gt->steering[L3BANK].instance_target = __ffs(fuse); @@ -276,7 +289,7 @@ static void init_steering_l3bank(struct xe_gt *gt) static void init_steering_mslice(struct xe_gt *gt) { u32 mask = REG_FIELD_GET(MEML3_EN_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(>->mmio, MIRROR_FUSE3)); /* * mslice registers are valid (not terminated) if either the meml3 @@ -352,6 +365,19 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, *instance = dss % gt->steering_dss_per_grp; } +/** + * xe_gt_mcr_steering_info_to_dss_id - Get DSS ID from group/instance steering + * @gt: GT structure + * @group: steering group ID + * @instance: steering instance ID + * + * Return: the coverted DSS id. + */ +u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance) +{ + return group * dss_per_group(gt) + instance; +} + static void init_steering_dss(struct xe_gt *gt) { gt->steering_dss_per_grp = dss_per_group(gt); @@ -380,7 +406,7 @@ static void init_steering_oaddrm(struct xe_gt *gt) static void init_steering_sqidi_psmi(struct xe_gt *gt) { u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, - xe_mmio_read32(gt, MIRROR_FUSE3)); + xe_mmio_read32(>->mmio, MIRROR_FUSE3)); u32 select = __ffs(mask); gt->steering[SQIDI_PSMI].group_target = select >> 1; @@ -439,7 +465,10 @@ void xe_gt_mcr_init(struct xe_gt *gt) if (gt->info.type == XE_GT_TYPE_MEDIA) { drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13); - if (MEDIA_VERx100(xe) >= 1301) { + if (MEDIA_VER(xe) >= 30) { + gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; + gt->steering[INSTANCE0].ranges = xe3lpm_instance0_steering_table; + } else if (MEDIA_VERx100(xe) >= 1301) { gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table; } else { @@ -494,8 +523,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); - xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val); - xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val); + xe_mmio_write32(>->mmio, MCFG_MCR_SELECTOR, steer_val); + xe_mmio_write32(>->mmio, SF_MCR_SELECTOR, steer_val); /* * For GAM registers, all reads should be directed to instance 1 * (unicast reads against other instances are not allowed), @@ -533,7 +562,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, continue; for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { - if (xe_mmio_in_range(gt, >->steering[type].ranges[i], reg)) { + if (xe_mmio_in_range(>->mmio, >->steering[type].ranges[i], reg)) { *group = gt->steering[type].group_target; *instance = gt->steering[type].instance_target; return true; @@ -544,7 +573,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges; if (implicit_ranges) for (int i = 0; implicit_ranges[i].end > 0; i++) - if (xe_mmio_in_range(gt, &implicit_ranges[i], reg)) + if (xe_mmio_in_range(>->mmio, &implicit_ranges[i], reg)) return false; /* @@ -579,7 +608,7 @@ static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock) * when a read to the relevant register returns 1. */ if (GRAPHICS_VERx100(xe) >= 1270) - ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, + ret = xe_mmio_wait32(>->mmio, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, true); drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); @@ -589,7 +618,7 @@ static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock) { /* Release hardware semaphore - this is done by writing 1 to the register */ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) - xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1); + xe_mmio_write32(>->mmio, STEER_SEMAPHORE, 0x1); spin_unlock(>->mcr_lock); } @@ -603,6 +632,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, u8 rw_flag, int group, int instance, u32 value) { const struct xe_reg reg = to_xe_reg(reg_mcr); + struct xe_mmio *mmio = >->mmio; struct xe_reg steer_reg; u32 steer_val, val = 0; @@ -635,12 +665,12 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, if (rw_flag == MCR_OP_READ) steer_val |= MCR_MULTICAST; - xe_mmio_write32(gt, steer_reg, steer_val); + xe_mmio_write32(mmio, steer_reg, steer_val); if (rw_flag == MCR_OP_READ) - val = xe_mmio_read32(gt, reg); + val = xe_mmio_read32(mmio, reg); else - xe_mmio_write32(gt, reg, value); + xe_mmio_write32(mmio, reg, value); /* * If we turned off the multicast bit (during a write) we're required @@ -649,7 +679,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, * operation. */ if (rw_flag == MCR_OP_WRITE) - xe_mmio_write32(gt, steer_reg, MCR_MULTICAST); + xe_mmio_write32(mmio, steer_reg, MCR_MULTICAST); return val; } @@ -684,7 +714,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) group, instance, 0); mcr_unlock(gt); } else { - val = xe_mmio_read32(gt, reg); + val = xe_mmio_read32(>->mmio, reg); } return val; @@ -757,7 +787,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, * to touch the steering register. */ mcr_lock(gt); - xe_mmio_write32(gt, reg, value); + xe_mmio_write32(>->mmio, reg, value); mcr_unlock(gt); } diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h index 8d119a0d5493..c0cd36021c24 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.h +++ b/drivers/gpu/drm/xe/xe_gt_mcr.h @@ -28,6 +28,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg, void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p); void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance); +u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance); /* * Loop over each DSS and determine the group and instance IDs that diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h index d6228baaff1e..5dc71394372d 100644 --- a/drivers/gpu/drm/xe/xe_gt_printk.h +++ b/drivers/gpu/drm/xe/xe_gt_printk.h @@ -8,7 +8,7 @@ #include <drm/drm_print.h> -#include "xe_device_types.h" +#include "xe_gt_types.h" #define xe_gt_printk(_gt, _level, _fmt, ...) \ drm_##_level(>_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index 905f409db74b..e71fc3d2bda2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -5,12 +5,15 @@ #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" #include "regs/xe_regs.h" +#include "xe_gt.h" #include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_service.h" #include "xe_mmio.h" @@ -72,7 +75,7 @@ static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe) static void pf_enable_ggtt_guest_update(struct xe_gt *gt) { - xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN); + xe_mmio_write32(>->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN); } /** @@ -87,6 +90,57 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) pf_enable_ggtt_guest_update(gt); xe_gt_sriov_pf_service_update(gt); + xe_gt_sriov_pf_migration_init(gt); +} + +static u32 pf_get_vf_regs_stride(struct xe_device *xe) +{ + return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000; +} + +static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride) +{ + struct xe_reg pf_reg = vf_reg; + + pf_reg.vf = 0; + pf_reg.addr += stride * vfid; + + return pf_reg; +} + +static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid) +{ + u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt)); + struct xe_reg scratch; + int n, count; + + if (xe_gt_is_media_type(gt)) { + count = MED_VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(>->mmio, scratch, 0); + } + } else { + count = VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(>->mmio, scratch, 0); + } + } +} + +/** + * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function can only be called on PF. + */ +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + pf_clear_vf_scratch_regs(gt, vfid); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f0cb726a6919..96fab779a906 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -11,6 +11,7 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index afdb477ecf83..192643d63d22 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -34,6 +34,8 @@ #include "xe_ttm_vram_mgr.h" #include "xe_wopcm.h" +#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo))) + /* * Return: number of KLVs that were successfully parsed and saved, * negative error code on failure. @@ -229,14 +231,16 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i } /* Return: number of configuration dwords written */ -static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) +static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) { u32 n = 0; if (xe_ggtt_node_allocated(config->ggtt_region)) { - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); - cfg[n++] = lower_32_bits(config->ggtt_region->base.start); - cfg[n++] = upper_32_bits(config->ggtt_region->base.start); + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); + cfg[n++] = lower_32_bits(config->ggtt_region->base.start); + cfg[n++] = upper_32_bits(config->ggtt_region->base.start); + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); cfg[n++] = lower_32_bits(config->ggtt_region->base.size); @@ -247,20 +251,24 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config) } /* Return: number of configuration dwords written */ -static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config) +static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) { u32 n = 0; - n += encode_config_ggtt(cfg, config); + n += encode_config_ggtt(cfg, config, details); - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); - cfg[n++] = config->begin_ctx; + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID); + cfg[n++] = config->begin_ctx; + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS); cfg[n++] = config->num_ctxs; - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); - cfg[n++] = config->begin_db; + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID); + cfg[n++] = config->begin_db; + } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS); cfg[n++] = config->num_dbs; @@ -301,7 +309,7 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) if (!cfg) return -ENOMEM; - num_dwords = encode_config(cfg, config); + num_dwords = encode_config(cfg, config, true); xe_gt_assert(gt, num_dwords <= max_cfg_dwords); if (xe_gt_is_media_type(gt)) { @@ -309,10 +317,10 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid); /* media-GT will never include a GGTT config */ - xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config)); + xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); /* the GGTT config must be taken from the primary-GT instead */ - num_dwords += encode_config_ggtt(cfg + num_dwords, other); + num_dwords += encode_config_ggtt(cfg + num_dwords, other, true); } xe_gt_assert(gt, num_dwords <= max_cfg_dwords); @@ -2044,7 +2052,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) valid_all = valid_all && valid_lmem; } - return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA; + return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA; } /** @@ -2071,6 +2079,174 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid) } /** + * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be PF) + * @buf: the buffer to save a config to (or NULL if query the buf size) + * @size: the size of the buffer (or 0 if query the buf size) + * + * This function can only be called on PF. + * + * Return: mininum size of the buffer or the number of bytes saved, + * or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size) +{ + struct xe_gt_sriov_config *config; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + xe_gt_assert(gt, !(!buf ^ !size)); + + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + ret = pf_validate_vf_config(gt, vfid); + if (!size) { + ret = ret ? 0 : SZ_4K; + } else if (!ret) { + if (size < SZ_4K) { + ret = -ENOBUFS; + } else { + config = pf_pick_vf_config(gt, vfid); + ret = encode_config(buf, config, false) * sizeof(u32); + } + } + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + return ret; +} + +static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid, + u32 key, u32 len, const u32 *value) +{ + switch (key) { + case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY: + if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN) + return -EBADMSG; + return pf_provision_vf_ctxs(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY: + if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN) + return -EBADMSG; + return pf_provision_vf_dbs(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY: + if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN) + return -EBADMSG; + return pf_provision_exec_quantum(gt, vfid, value[0]); + + case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY: + if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN) + return -EBADMSG; + return pf_provision_preempt_timeout(gt, vfid, value[0]); + + /* auto-generate case statements */ +#define define_threshold_key_to_provision_case(TAG, ...) \ + case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \ + BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \ + if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \ + return -EBADMSG; \ + return pf_provision_threshold(gt, vfid, \ + MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \ + value[0]); + + MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case) +#undef define_threshold_key_to_provision_case + } + + if (xe_gt_is_media_type(gt)) + return -EKEYREJECTED; + + switch (key) { + case GUC_KLV_VF_CFG_GGTT_SIZE_KEY: + if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN) + return -EBADMSG; + return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0])); + + case GUC_KLV_VF_CFG_LMEM_SIZE_KEY: + if (!IS_DGFX(gt_to_xe(gt))) + return -EKEYREJECTED; + if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN) + return -EBADMSG; + return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0])); + } + + return -EKEYREJECTED; +} + +static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid, + const u32 *klvs, size_t num_dwords) +{ + int err; + + while (num_dwords >= GUC_KLV_LEN_MIN) { + u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]); + u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]); + + klvs += GUC_KLV_LEN_MIN; + num_dwords -= GUC_KLV_LEN_MIN; + + if (num_dwords < len) + err = -EBADMSG; + else + err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs); + + if (err) { + xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err)); + return err; + } + + klvs += len; + num_dwords -= len; + } + + return pf_validate_vf_config(gt, vfid); +} + +/** + * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob. + * @gt: the &xe_gt + * @vfid: the VF identifier (can't be PF) + * @buf: the buffer with config data + * @size: the size of the config data + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size) +{ + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid); + + if (!size) + return -ENODATA; + + if (size % sizeof(u32)) + return -EINVAL; + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + struct drm_printer p = xe_gt_info_printer(gt); + + drm_printf(&p, "restoring VF%u config:\n", vfid); + xe_guc_klv_print(buf, size / sizeof(u32), &p); + } + + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + err = pf_send_vf_cfg_reset(gt, vfid); + if (!err) { + pf_release_vf_config(gt, vfid); + err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32)); + } + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + return err; +} + +/** * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset. * @gt: the &xe_gt * @@ -2203,6 +2379,41 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p) } /** + * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations. + * @gt: the &xe_gt + * @p: the &drm_printer + * + * Print LMEM allocations across all VFs. + * VFs without LMEM allocation are skipped. + * + * This function can only be called on PF. + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) +{ + unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); + const struct xe_gt_sriov_config *config; + char buf[10]; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + + for (n = 1; n <= total_vfs; n++) { + config = >->sriov.pf.vfs[n].config; + if (!config->lmem_obj) + continue; + + string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2, + buf, sizeof(buf)); + drm_printf(p, "VF%u:\t%zu\t(%s)\n", + n, config->lmem_obj->size, buf); + } + + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + return 0; +} + +/** * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges. * @gt: the &xe_gt * @p: the &drm_printer diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h index 42e64769f666..0c55aa40a1a7 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h @@ -54,6 +54,10 @@ int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long tim int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force); int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh); +ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size); +int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, + const void *buf, size_t size); + bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_config_restart(struct xe_gt *gt); @@ -61,6 +65,7 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt); int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p); +int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 02f7328bd6ce..1f50aec3a059 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -9,9 +9,11 @@ #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_sriov_pf_service.h" #include "xe_gt_sriov_printk.h" @@ -176,6 +178,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) CASE2STR(PAUSE_SEND_PAUSE); CASE2STR(PAUSE_WAIT_GUC); CASE2STR(PAUSE_GUC_DONE); + CASE2STR(PAUSE_SAVE_GUC); CASE2STR(PAUSE_FAILED); CASE2STR(PAUSED); CASE2STR(RESUME_WIP); @@ -415,6 +418,10 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid) * : | : / * : v : / * : PAUSE_GUC_DONE o-----restart + * : | : + * : | o---<--busy : + * : v / / : + * : PAUSE_SAVE_GUC : * : / : * : / : * :....o..............o...............o...........: @@ -434,6 +441,7 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid) pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE); + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC); } } @@ -464,12 +472,41 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid) pf_enter_vf_pause_failed(gt, vfid); } +static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) + pf_enter_vf_state_machine_bug(gt, vfid); +} + +static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC)) + return false; + + err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid); + if (err) { + /* retry if busy */ + if (err == -EBUSY) { + pf_enter_vf_pause_save_guc(gt, vfid); + return true; + } + /* give up on error */ + if (err == -EIO) + pf_enter_vf_mismatch(gt, vfid); + } + + pf_enter_vf_pause_completed(gt, vfid); + return true; +} + static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid) { if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE)) return false; - pf_enter_vf_pause_completed(gt, vfid); + pf_enter_vf_pause_save_guc(gt, vfid); return true; } @@ -1008,7 +1045,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid) if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO)) return false; - /* XXX: placeholder */ + xe_gt_sriov_pf_sanitize_hw(gt, vfid); pf_enter_vf_flr_send_finish(gt, vfid); return true; @@ -1338,6 +1375,9 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid) if (pf_exit_vf_pause_guc_done(gt, vfid)) return true; + if (pf_exit_vf_pause_save_guc(gt, vfid)) + return true; + if (pf_exit_vf_resume_send_resume(gt, vfid)) return true; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index 11830aafea45..f02f941b4ad2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -27,6 +27,7 @@ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command. * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC. + * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state. * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed. * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused. * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress. @@ -56,6 +57,7 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE, + XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC, XE_GT_SRIOV_STATE_PAUSE_FAILED, XE_GT_SRIOV_STATE_PAUSED, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 2290ddaf9594..05df4ab3514b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -17,6 +17,7 @@ #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_debugfs.h" #include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_pf_service.h" @@ -81,6 +82,11 @@ static const struct drm_info_list pf_info[] = { .data = xe_gt_sriov_pf_config_print_dbs, }, { + "lmem_provisioned", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_lmem, + }, + { "runtime_registers", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_service_print_runtime, @@ -312,6 +318,9 @@ static const struct { { "stop", xe_gt_sriov_pf_control_stop_vf }, { "pause", xe_gt_sriov_pf_control_pause_vf }, { "resume", xe_gt_sriov_pf_control_resume_vf }, +#ifdef CONFIG_DRM_XE_DEBUG_SRIOV + { "restore!", xe_gt_sriov_pf_migration_restore_guc_state }, +#endif }; static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) @@ -375,6 +384,119 @@ static const struct file_operations control_ops = { .llseek = default_llseek, }; +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │  ├── vf1 + * │  │  ├── guc_state + */ +static ssize_t guc_state_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + + return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos); +} + +static ssize_t guc_state_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + + if (*pos) + return -EINVAL; + + return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count); +} + +static const struct file_operations guc_state_ops = { + .owner = THIS_MODULE, + .read = guc_state_read, + .write = guc_state_write, + .llseek = default_llseek, +}; + +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │  ├── vf1 + * │  │  ├── config_blob + */ +static ssize_t config_blob_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + ssize_t ret; + void *tmp; + + ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0); + if (!ret) + return -ENODATA; + if (ret < 0) + return ret; + + tmp = kzalloc(ret, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret); + if (ret > 0) + ret = simple_read_from_buffer(buf, count, pos, tmp, ret); + + kfree(tmp); + return ret; +} + +static ssize_t config_blob_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct dentry *dent = file_dentry(file); + struct dentry *parent = dent->d_parent; + struct xe_gt *gt = extract_gt(parent); + unsigned int vfid = extract_vfid(parent); + ssize_t ret; + void *tmp; + + if (*pos) + return -EINVAL; + + if (!count) + return -ENODATA; + + if (count > SZ_4K) + return -EINVAL; + + tmp = kzalloc(count, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + if (copy_from_user(tmp, buf, count)) { + ret = -EFAULT; + } else { + ret = xe_gt_sriov_pf_config_restore(gt, vfid, tmp, count); + if (!ret) + ret = count; + } + kfree(tmp); + return ret; +} + +static const struct file_operations config_blob_ops = { + .owner = THIS_MODULE, + .read = config_blob_read, + .write = config_blob_write, + .llseek = default_llseek, +}; + /** * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs. * @gt: the &xe_gt to register @@ -423,5 +545,15 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) pf_add_config_attrs(gt, vfdentry, VFID(n)); debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops); + + /* for testing/debugging purposes only! */ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + debugfs_create_file("guc_state", + IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, + vfdentry, NULL, &guc_state_ops); + debugfs_create_file("config_blob", + IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400, + vfdentry, NULL, &config_blob_ops); + } } } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c new file mode 100644 index 000000000000..c712111aa30d --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "abi/guc_actions_sriov_abi.h" +#include "xe_bo.h" +#include "xe_gt_sriov_pf_helpers.h" +#include "xe_gt_sriov_pf_migration.h" +#include "xe_gt_sriov_printk.h" +#include "xe_guc.h" +#include "xe_guc_ct.h" +#include "xe_sriov.h" + +/* Return: number of dwords saved/restored/required or a negative error code on failure */ +static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode, + u64 addr, u32 ndwords) +{ + u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) | + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords), + }; + + return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request)); +} + +/* Return: size of the state in dwords or a negative error code on failure */ +static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + ret = guc_action_vf_save_restore(>->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0); + return ret ?: -ENODATA; +} + +/* Return: number of state dwords saved or a negative error code on failure */ +static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, + void *buff, size_t size) +{ + const int ndwords = size / sizeof(u32); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = tile_to_xe(tile); + struct xe_guc *guc = >->uc.guc; + struct xe_bo *bo; + int ret; + + xe_gt_assert(gt, size % sizeof(u32) == 0); + xe_gt_assert(gt, size == ndwords * sizeof(u32)); + + bo = xe_bo_create_pin_map(xe, tile, NULL, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE, + xe_bo_ggtt_addr(bo), ndwords); + if (!ret) + ret = -ENODATA; + else if (ret > ndwords) + ret = -EPROTO; + else if (ret > 0) + xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32)); + + xe_bo_unpin_map_no_vm(bo); + return ret; +} + +/* Return: number of state dwords restored or a negative error code on failure */ +static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, + const void *buff, size_t size) +{ + const int ndwords = size / sizeof(u32); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = tile_to_xe(tile); + struct xe_guc *guc = >->uc.guc; + struct xe_bo *bo; + int ret; + + xe_gt_assert(gt, size % sizeof(u32) == 0); + xe_gt_assert(gt, size == ndwords * sizeof(u32)); + + bo = xe_bo_create_pin_map(xe, tile, NULL, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size); + + ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE, + xe_bo_ggtt_addr(bo), ndwords); + if (!ret) + ret = -ENODATA; + else if (ret > ndwords) + ret = -EPROTO; + + xe_bo_unpin_map_no_vm(bo); + return ret; +} + +static bool pf_migration_supported(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + return gt->sriov.pf.migration.supported; +} + +static struct mutex *pf_migration_mutex(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + return >->sriov.pf.migration.snapshot_lock; +} + +static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt, + unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + lockdep_assert_held(pf_migration_mutex(gt)); + + return >->sriov.pf.vfs[vfid].snapshot; +} + +static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs; +} + +static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + struct xe_device *xe = gt_to_xe(gt); + + drmm_kfree(&xe->drm, snapshot->guc.buff); + snapshot->guc.buff = NULL; + snapshot->guc.size = 0; +} + +static int pf_alloc_guc_state(struct xe_gt *gt, + struct xe_gt_sriov_state_snapshot *snapshot, + size_t size) +{ + struct xe_device *xe = gt_to_xe(gt); + void *p; + + pf_free_guc_state(gt, snapshot); + + if (!size) + return -ENODATA; + + if (size % sizeof(u32)) + return -EINVAL; + + if (size > SZ_2M) + return -EFBIG; + + p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL); + if (!p) + return -ENOMEM; + + snapshot->guc.buff = p; + snapshot->guc.size = size; + return 0; +} + +static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot) +{ + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { + unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot); + + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n", + vfid, snapshot->guc.size / sizeof(u32)); + print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET, + snapshot->guc.buff, min(SZ_64, snapshot->guc.size)); + } +} + +static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + size_t size; + int ret; + + ret = pf_send_guc_query_vf_state_size(gt, vfid); + if (ret < 0) + goto fail; + size = ret * sizeof(u32); + xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size); + + ret = pf_alloc_guc_state(gt, snapshot, size); + if (ret < 0) + goto fail; + + ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size); + if (ret < 0) + goto fail; + size = ret * sizeof(u32); + xe_gt_assert(gt, size); + xe_gt_assert(gt, size <= snapshot->guc.size); + snapshot->guc.size = size; + + pf_dump_guc_state(gt, snapshot); + return 0; + +fail: + xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret)); + pf_free_guc_state(gt, snapshot); + return ret; +} + +/** + * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + err = pf_save_vf_guc_state(gt, vfid); + mutex_unlock(pf_migration_mutex(gt)); + + return err; +} + +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid); + int ret; + + if (!snapshot->guc.size) + return -ENODATA; + + xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n", + snapshot->guc.size / sizeof(u32), vfid); + ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size); + if (ret < 0) + goto fail; + + xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid); + return 0; + +fail: + xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +/** + * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid) +{ + int ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + ret = pf_restore_vf_guc_state(gt, vfid); + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +/** + * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @buf: the user space buffer to read to + * @count: the maximum number of bytes to read + * @pos: the current position in the buffer + * + * This function is for PF only. + * + * This function reads up to @count bytes from the saved VF GuC state buffer + * at offset @pos into the user space address starting at @buf. + * + * Return: the number of bytes read or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, + char __user *buf, size_t count, loff_t *pos) +{ + struct xe_gt_sriov_state_snapshot *snapshot; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + snapshot = pf_pick_vf_snapshot(gt, vfid); + if (snapshot->guc.size) + ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff, + snapshot->guc.size); + else + ret = -ENODATA; + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} + +/** + * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state. + * @gt: the &xe_gt + * @vfid: the VF identifier + * @buf: the user space buffer with GuC VF state + * @size: the size of GuC VF state (in bytes) + * + * This function is for PF only. + * + * This function reads @size bytes of the VF GuC state stored at user space + * address @buf and writes it into a internal VF state buffer. + * + * Return: the number of bytes used or a negative error code on failure. + */ +ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, + const char __user *buf, size_t size) +{ + struct xe_gt_sriov_state_snapshot *snapshot; + loff_t pos = 0; + ssize_t ret; + + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + xe_gt_assert(gt, vfid != PFID); + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); + + if (!pf_migration_supported(gt)) + return -ENOPKG; + + mutex_lock(pf_migration_mutex(gt)); + snapshot = pf_pick_vf_snapshot(gt, vfid); + ret = pf_alloc_guc_state(gt, snapshot, size); + if (!ret) { + ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size); + if (ret < 0) + pf_free_guc_state(gt, snapshot); + else + pf_dump_guc_state(gt, snapshot); + } + mutex_unlock(pf_migration_mutex(gt)); + + return ret; +} +#endif /* CONFIG_DEBUG_FS */ + +static bool pf_check_migration_support(struct xe_gt *gt) +{ + /* GuC 70.25 with save/restore v2 is required */ + xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0)); + + /* XXX: for now this is for feature enabling only */ + return IS_ENABLED(CONFIG_DRM_XE_DEBUG); +} + +/** + * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration. + * @gt: the &xe_gt + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_migration_init(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + int err; + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + gt->sriov.pf.migration.supported = pf_check_migration_support(gt); + + if (!pf_migration_supported(gt)) + return 0; + + err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock); + if (err) + return err; + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h new file mode 100644 index 000000000000..09faeae00ddb --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PF_MIGRATION_H_ +#define _XE_GT_SRIOV_PF_MIGRATION_H_ + +#include <linux/types.h> + +struct xe_gt; + +int xe_gt_sriov_pf_migration_init(struct xe_gt *gt); +int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid); + +#ifdef CONFIG_DEBUG_FS +ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid, + char __user *buf, size_t count, loff_t *pos); +ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid, + const char __user *buf, size_t count); +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h new file mode 100644 index 000000000000..1f3110b6d44f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ +#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_ + +#include <linux/mutex.h> +#include <linux/types.h> + +/** + * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data. + * + * Used by the PF driver to maintain per-VF migration data. + */ +struct xe_gt_sriov_state_snapshot { + /** @guc: GuC VF state snapshot */ + struct { + /** @guc.buff: buffer with the VF state */ + u32 *buff; + /** @guc.size: size of the buffer (must be dwords aligned) */ + u32 size; + } guc; +}; + +/** + * struct xe_gt_sriov_pf_migration - GT-level data. + * + * Used by the PF driver to maintain non-VF specific per-GT data. + */ +struct xe_gt_sriov_pf_migration { + /** @supported: indicates whether the feature is supported */ + bool supported; + + /** @snapshot_lock: protects all VFs snapshots */ + struct mutex snapshot_lock; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 0e23b7ea4f3e..924e75b94aec 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -237,7 +237,7 @@ static void read_many(struct xe_gt *gt, unsigned int count, const struct xe_reg *regs, u32 *values) { while (count--) - *values++ = xe_mmio_read32(gt, *regs++); + *values++ = xe_mmio_read32(>->mmio, *regs++); } static void pf_prepare_runtime_info(struct xe_gt *gt) @@ -402,7 +402,7 @@ static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit, for (i = 0; i < count; ++i, ++data) { addr = runtime->regs[start + i].addr; - data->offset = xe_mmio_adjusted_addr(gt, addr); + data->offset = xe_mmio_adjusted_addr(>->mmio, addr); data->value = runtime->values[start + i]; } @@ -513,7 +513,7 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p for (; size--; regs++, values++) { drm_printf(p, "reg[%#x] = %#x\n", - xe_mmio_adjusted_addr(gt, regs->addr), *values); + xe_mmio_adjusted_addr(>->mmio, regs->addr), *values); } return 0; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index 28e1b130bf87..0426b1a77069 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -10,6 +10,7 @@ #include "xe_gt_sriov_pf_config_types.h" #include "xe_gt_sriov_pf_control_types.h" +#include "xe_gt_sriov_pf_migration_types.h" #include "xe_gt_sriov_pf_monitor_types.h" #include "xe_gt_sriov_pf_policy_types.h" #include "xe_gt_sriov_pf_service_types.h" @@ -29,6 +30,9 @@ struct xe_gt_sriov_metadata { /** @version: negotiated VF/PF ABI version */ struct xe_gt_sriov_pf_service_version version; + + /** @snapshot: snapshot of the VF state data */ + struct xe_gt_sriov_state_snapshot snapshot; }; /** @@ -36,6 +40,7 @@ struct xe_gt_sriov_metadata { * @service: service data. * @control: control data. * @policy: policy data. + * @migration: migration data. * @spare: PF-only provisioning configuration. * @vfs: metadata for all VFs. */ @@ -43,6 +48,7 @@ struct xe_gt_sriov_pf { struct xe_gt_sriov_pf_service service; struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; + struct xe_gt_sriov_pf_migration migration; struct xe_gt_sriov_spare_config spare; struct xe_gt_sriov_metadata *vfs; }; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 4ebc82e607af..d3baba50f085 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -881,7 +881,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr) */ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); struct vf_runtime_reg *rr; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); @@ -917,7 +917,7 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) */ void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); xe_gt_assert(gt, !reg.vf); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c index f3ddcbefc6bc..2ed5b6780d30 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c @@ -33,7 +33,7 @@ static const struct drm_info_list vf_info[] = { .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_vf_print_version, }, -#if defined(CONFIG_DRM_XE_DEBUG) || defined(CONFIG_DRM_XE_DEBUG_SRIOV) +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) || IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) { "runtime_regs", .show = xe_gt_debugfs_simple_show, diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c index 25963e33a383..03b225364101 100644 --- a/drivers/gpu/drm/xe/xe_gt_throttle.c +++ b/drivers/gpu/drm/xe/xe_gt_throttle.c @@ -41,9 +41,9 @@ u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt) xe_pm_runtime_get(gt_to_xe(gt)); if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS); + reg = xe_mmio_read32(>->mmio, MTL_MEDIA_PERF_LIMIT_REASONS); else - reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS); + reg = xe_mmio_read32(>->mmio, GT0_PERF_LIMIT_REASONS); xe_pm_runtime_put(gt_to_xe(gt)); return reg; diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 9d82ea30f4df..3cb228c773cd 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -270,6 +270,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt, int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; if (xe_guc_ct_enabled(>->uc.guc.ct) && gt->uc.guc.submission_state.enabled) { @@ -283,20 +284,22 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { + struct xe_mmio *mmio = >->mmio; + if (IS_SRIOV_VF(xe)) return 0; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) { - xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1, + xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1, PVC_GUC_TLB_INV_DESC1_INVALIDATE); - xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0, + xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0, PVC_GUC_TLB_INV_DESC0_VALID); } else { - xe_mmio_write32(gt, GUC_TLB_INV_CR, + xe_mmio_write32(mmio, GUC_TLB_INV_CR, GUC_TLB_INV_CR_INVALIDATE); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } return 0; diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 0662f71c6ede..df2042db7ee6 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -5,6 +5,7 @@ #include "xe_gt_topology.h" +#include <generated/xe_wa_oob.h> #include <linux/bitmap.h> #include <linux/compiler.h> @@ -12,6 +13,7 @@ #include "xe_assert.h" #include "xe_gt.h" #include "xe_mmio.h" +#include "xe_wa.h" static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) @@ -25,7 +27,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) va_start(argp, numregs); for (i = 0; i < numregs; i++) - fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg)); + fuse_val[i] = xe_mmio_read32(>->mmio, va_arg(argp, struct xe_reg)); va_end(argp); bitmap_from_arr32(mask, fuse_val, numregs * 32); @@ -35,7 +37,7 @@ static void load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type) { struct xe_device *xe = gt_to_xe(gt); - u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE); + u32 reg_val = xe_mmio_read32(>->mmio, XELP_EU_ENABLE); u32 val = 0; int i; @@ -127,7 +129,19 @@ static void load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) { struct xe_device *xe = gt_to_xe(gt); - u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3); + u32 fuse3 = xe_mmio_read32(>->mmio, MIRROR_FUSE3); + + /* + * PTL platforms with media version 30.00 do not provide proper values + * for the media GT's L3 bank registers. Skip the readout since we + * don't have any way to obtain real values. + * + * This may get re-described as an official workaround in the future, + * but there's no tracking number assigned yet so we use a custom + * OOB workaround descriptor. + */ + if (XE_WA(gt, no_media_l3)) + return; if (GRAPHICS_VER(xe) >= 20) { xe_l3_bank_mask_t per_node = {}; @@ -141,7 +155,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) xe_l3_bank_mask_t per_node = {}; xe_l3_bank_mask_t per_mask_bit = {}; u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3); - u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4); + u32 fuse4 = xe_mmio_read32(>->mmio, XEHP_FUSE4); u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4); bitmap_set_value8(per_mask_bit, 0x3, 0); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 3d1c51de0268..a287b98ee70b 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -6,6 +6,7 @@ #ifndef _XE_GT_TYPES_H_ #define _XE_GT_TYPES_H_ +#include "xe_device_types.h" #include "xe_force_wake_types.h" #include "xe_gt_idle_types.h" #include "xe_gt_sriov_pf_types.h" @@ -145,19 +146,20 @@ struct xe_gt { /** * @mmio: mmio info for GT. All GTs within a tile share the same * register space, but have their own copy of GSI registers at a - * specific offset, as well as their own forcewake handling. + * specific offset. + */ + struct xe_mmio mmio; + + /** + * @pm: power management info for GT. The driver uses the GT's + * "force wake" interface to wake up specific parts of the GT hardware + * from C6 sleep states and ensure the hardware remains awake while it + * is being actively used. */ struct { - /** @mmio.fw: force wake for GT */ + /** @pm.fw: force wake for GT */ struct xe_force_wake fw; - /** - * @mmio.adj_limit: adjust MMIO address if address is below this - * value - */ - u32 adj_limit; - /** @mmio.adj_offset: offect to add to MMIO address when adjusting */ - u32 adj_offset; - } mmio; + } pm; /** @sriov: virtualization data related to GT */ union { diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 52df28032a6f..7f704346a8f4 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -14,6 +14,7 @@ #include "regs/xe_gt_regs.h" #include "regs/xe_gtt_defs.h" #include "regs/xe_guc_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_bo.h" #include "xe_device.h" #include "xe_force_wake.h" @@ -22,6 +23,7 @@ #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" #include "xe_guc_ads.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" #include "xe_guc_hwconfig.h" @@ -68,7 +70,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc) static u32 guc_ctl_feature_flags(struct xe_guc *guc) { - u32 flags = 0; + u32 flags = GUC_CTL_ENABLE_LITE_RESTORE; if (!guc_to_xe(guc)->info.skip_guc_pc) flags |= GUC_CTL_ENABLE_SLPC; @@ -236,20 +238,21 @@ static void guc_write_params(struct xe_guc *guc) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - xe_mmio_write32(gt, SOFT_SCRATCH(0), 0); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(1 + i), guc->params[i]); } static void guc_fini_hw(void *arg) { struct xe_guc *guc = arg; struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; - xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_fini_hw(&guc_to_gt(guc)->uc); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -338,6 +341,10 @@ int xe_guc_init(struct xe_guc *guc) if (ret) goto out; + ret = xe_guc_capture_init(guc); + if (ret) + goto out; + ret = xe_guc_ads_init(&guc->ads); if (ret) goto out; @@ -425,6 +432,7 @@ int xe_guc_post_load_init(struct xe_guc *guc) int xe_guc_reset(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; u32 guc_status, gdrst; int ret; @@ -433,15 +441,15 @@ int xe_guc_reset(struct xe_guc *guc) if (IS_SRIOV_VF(gt_to_xe(gt))) return xe_gt_sriov_vf_bootstrap(gt); - xe_mmio_write32(gt, GDRST, GRDOM_GUC); + xe_mmio_write32(mmio, GDRST, GRDOM_GUC); - ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); + ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); if (ret) { xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst); goto err_out; } - guc_status = xe_mmio_read32(gt, GUC_STATUS); + guc_status = xe_mmio_read32(mmio, GUC_STATUS); if (!(guc_status & GS_MIA_IN_RESET)) { xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n", guc_status); @@ -459,6 +467,7 @@ err_out: static void guc_prepare_xfer(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; struct xe_device *xe = guc_to_xe(guc); u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC | GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | @@ -473,12 +482,12 @@ static void guc_prepare_xfer(struct xe_guc *guc) shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); /* Must program this register before loading the ucode with DMA */ - xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags); + xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags); - xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE); + xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE); /* Make sure GuC receives ARAT interrupts */ - xe_mmio_rmw32(gt, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0); + xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0); } /* @@ -494,7 +503,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) if (guc->fw.rsa_size > 256) { u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + xe_uc_fw_rsa_offset(&guc->fw); - xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); + xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); return 0; } @@ -503,7 +512,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) return -ENOMEM; for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) - xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]); + xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(i), rsa[i]); return 0; } @@ -583,7 +592,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) * extreme thermal throttling. And a system that is that hot during boot is probably * dead anyway! */ -#if defined(CONFIG_DRM_XE_DEBUG) +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) #define GUC_LOAD_RETRY_LIMIT 20 #else #define GUC_LOAD_RETRY_LIMIT 3 @@ -593,6 +602,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) static void guc_wait_ucode(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; struct xe_guc_pc *guc_pc = >->uc.guc.pc; ktime_t before, after, delta; int load_done; @@ -619,7 +629,7 @@ static void guc_wait_ucode(struct xe_guc *guc) * timeouts rather than allowing a huge timeout each time. So basically, need * to treat a timeout no different to a value change. */ - ret = xe_mmio_wait32_not(gt, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK, + ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK, last_status, 1000 * 1000, &status, false); if (ret < 0) count++; @@ -657,7 +667,7 @@ static void guc_wait_ucode(struct xe_guc *guc) switch (bootrom) { case XE_BOOTROM_STATUS_NO_KEY_FOUND: xe_gt_err(gt, "invalid key requested, header = 0x%08X\n", - xe_mmio_read32(gt, GUC_HEADER_INFO)); + xe_mmio_read32(mmio, GUC_HEADER_INFO)); break; case XE_BOOTROM_STATUS_RSA_FAILED: @@ -672,7 +682,7 @@ static void guc_wait_ucode(struct xe_guc *guc) switch (ukernel) { case XE_GUC_LOAD_STATUS_EXCEPTION: xe_gt_err(gt, "firmware exception. EIP: %#x\n", - xe_mmio_read32(gt, SOFT_SCRATCH(13))); + xe_mmio_read32(mmio, SOFT_SCRATCH(13))); break; case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID: @@ -824,10 +834,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - msg = xe_mmio_read32(gt, SOFT_SCRATCH(15)); + msg = xe_mmio_read32(>->mmio, SOFT_SCRATCH(15)); msg &= XE_GUC_RECV_MSG_EXCEPTION | XE_GUC_RECV_MSG_CRASH_DUMP_POSTED; - xe_mmio_write32(gt, SOFT_SCRATCH(15), 0); + xe_mmio_write32(>->mmio, SOFT_SCRATCH(15), 0); if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED) xe_gt_err(gt, "Received early GuC crash dump notification!\n"); @@ -844,14 +854,14 @@ static void guc_enable_irq(struct xe_guc *guc) REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); /* Primary GuC and media GuC share a single enable bit */ - xe_mmio_write32(gt, GUC_SG_INTR_ENABLE, + xe_mmio_write32(>->mmio, GUC_SG_INTR_ENABLE, REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST)); /* * There are separate mask bits for primary and media GuCs, so use * a RMW operation to avoid clobbering the other GuC's setting. */ - xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0); + xe_mmio_rmw32(>->mmio, GUC_SG_INTR_MASK, events, 0); } int xe_guc_enable_communication(struct xe_guc *guc) @@ -863,7 +873,7 @@ int xe_guc_enable_communication(struct xe_guc *guc) struct xe_gt *gt = guc_to_gt(guc); struct xe_tile *tile = gt_to_tile(gt); - err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); + err = xe_memirq_init_guc(&tile->memirq, guc); if (err) return err; } else { @@ -907,7 +917,7 @@ void xe_guc_notify(struct xe_guc *guc) * additional payload data to the GuC but this capability is not * used by the firmware yet. Use default value in the meantime. */ - xe_mmio_write32(gt, guc->notify_reg, default_notify_data); + xe_mmio_write32(>->mmio, guc->notify_reg, default_notify_data); } int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr) @@ -925,6 +935,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, { struct xe_device *xe = guc_to_xe(guc); struct xe_gt *gt = guc_to_gt(guc); + struct xe_mmio *mmio = >->mmio; u32 header, reply; struct xe_reg reply_reg = xe_gt_is_media_type(gt) ? MED_VF_SW_FLAG(0) : VF_SW_FLAG(0); @@ -947,19 +958,19 @@ retry: /* Not in critical data-path, just do if else for GT type */ if (xe_gt_is_media_type(gt)) { for (i = 0; i < len; ++i) - xe_mmio_write32(gt, MED_VF_SW_FLAG(i), + xe_mmio_write32(mmio, MED_VF_SW_FLAG(i), request[i]); - xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX)); + xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX)); } else { for (i = 0; i < len; ++i) - xe_mmio_write32(gt, VF_SW_FLAG(i), + xe_mmio_write32(mmio, VF_SW_FLAG(i), request[i]); - xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX)); + xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX)); } xe_guc_notify(guc); - ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN, + ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN, FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC), 50000, &reply, false); if (ret) { @@ -969,7 +980,7 @@ timeout: return ret; } - header = xe_mmio_read32(gt, reply_reg); + header = xe_mmio_read32(mmio, reply_reg); if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { /* @@ -985,7 +996,7 @@ timeout: BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS); BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); - ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, + ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask, 1000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != @@ -1032,7 +1043,7 @@ proto: for (i = 1; i < VF_SW_FLAG_COUNT; i++) { reply_reg.addr += sizeof(u32); - response_buf[i] = xe_mmio_read32(gt, reply_reg); + response_buf[i] = xe_mmio_read32(mmio, reply_reg); } } @@ -1145,17 +1156,17 @@ int xe_guc_start(struct xe_guc *guc) void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) { struct xe_gt *gt = guc_to_gt(guc); + unsigned int fw_ref; u32 status; - int err; int i; xe_uc_fw_print(&guc->fw, p); - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; - status = xe_mmio_read32(gt, GUC_STATUS); + status = xe_mmio_read32(>->mmio, GUC_STATUS); drm_printf(p, "\nGuC status 0x%08x:\n", status); drm_printf(p, "\tBootrom status = 0x%x\n", @@ -1170,12 +1181,15 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) drm_puts(p, "\nScratch registers:\n"); for (i = 0; i < SOFT_SCRATCH_COUNT; i++) { drm_printf(p, "\t%2d: \t0x%x\n", - i, xe_mmio_read32(gt, SOFT_SCRATCH(i))); + i, xe_mmio_read32(>->mmio, SOFT_SCRATCH(i))); } - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + drm_puts(p, "\n"); xe_guc_ct_print(&guc->ct, p, false); + + drm_puts(p, "\n"); xe_guc_submit_print(guc, p); } diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index 42116b167c98..58338be44558 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -82,4 +82,9 @@ static inline struct xe_device *guc_to_xe(struct xe_guc *guc) return gt_to_xe(guc_to_gt(guc)); } +static inline struct drm_device *guc_to_drm(struct xe_guc *guc) +{ + return &guc_to_xe(guc)->drm; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index d1902a8581ca..4e746ae98888 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -5,6 +5,8 @@ #include "xe_guc_ads.h" +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include <generated/xe_wa_oob.h> @@ -18,6 +20,7 @@ #include "xe_gt_ccs_mode.h" #include "xe_gt_printk.h" #include "xe_guc.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_hw_engine.h" #include "xe_lrc.h" @@ -149,8 +152,7 @@ static u32 guc_ads_waklv_size(struct xe_guc_ads *ads) static size_t guc_ads_capture_size(struct xe_guc_ads *ads) { - /* FIXME: Allocate a proper capture list */ - return PAGE_ALIGN(PAGE_SIZE); + return PAGE_ALIGN(ads->capture_size); } static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads) @@ -357,6 +359,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads) GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE, &offset, &remain); + if (XE_WA(gt, 14022866841)) + guc_waklv_enable_simple(ads, + GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO, + &offset, &remain); + /* * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now, * the default value for this register is determined to be 0xC40. This could change in the @@ -404,6 +411,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) struct xe_bo *bo; ads->golden_lrc_size = calculate_golden_lrc_size(ads); + ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); ads->regset_size = calculate_regset_size(gt); ads->ads_waklv_size = calculate_waklv_size(ads); @@ -418,14 +426,15 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) return 0; } +ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */ /** * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load * @ads: Additional data structures object * - * Recalcuate golden_lrc_size & regset_size as the number hardware engines may - * have changed after the hwconfig was loaded. Also verify the new sizes fit in - * the already allocated ADS buffer object. + * Recalculate golden_lrc_size, capture_size and regset_size as the number + * hardware engines may have changed after the hwconfig was loaded. Also verify + * the new sizes fit in the already allocated ADS buffer object. * * Return: 0 on success, negative error code on error. */ @@ -437,6 +446,8 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads) xe_gt_assert(gt, ads->bo); ads->golden_lrc_size = calculate_golden_lrc_size(ads); + /* Calculate Capture size with worst size */ + ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); ads->regset_size = calculate_regset_size(gt); xe_gt_assert(gt, ads->golden_lrc_size + @@ -536,20 +547,148 @@ static void guc_mapping_table_init(struct xe_gt *gt, } } -static void guc_capture_list_init(struct xe_guc_ads *ads) +static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map, + enum guc_capture_list_class_type capture_class) +{ + struct xe_device *xe = gt_to_xe(gt); + u32 mask; + + switch (capture_class) { + case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]); + mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_VIDEO: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_BLITTER: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]); + break; + case GUC_CAPTURE_LIST_CLASS_GSC_OTHER: + mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]); + break; + default: + mask = 0; + } + + return mask; +} + +static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt, + int owner, int type, int class, u32 *total_size, size_t *size, + void **pptr) +{ + *size = 0; + + if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) { + if (*total_size + *size > ads->capture_size) + xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n", + *total_size + *size, ads->capture_size); + else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr)) + return false; + } + + return true; +} + +static int guc_capture_prep_lists(struct xe_guc_ads *ads) { + struct xe_guc *guc = ads_to_guc(ads); + struct xe_gt *gt = ads_to_gt(ads); + u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0; + struct iosys_map info_map; + size_t size = 0; + void *ptr; int i, j; - u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads); - /* FIXME: Populate a proper capture list */ + /* + * GuC Capture's steered reg-list needs to be allocated and initialized + * after the GuC-hwconfig is available which guaranteed from here. + */ + xe_guc_capture_steered_list_init(ads_to_guc(ads)); + + capture_offset = guc_ads_capture_offset(ads); + ads_ggtt = xe_bo_ggtt_addr(ads->bo); + info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), + offsetof(struct __guc_ads_blob, system_info)); + + /* first, set aside the first page for a capture_list with zero descriptors */ + total_size = PAGE_SIZE; + if (!xe_guc_capture_getnullheader(guc, &ptr, &size)) + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size); + + null_ggtt = ads_ggtt + capture_offset; + capture_offset += PAGE_SIZE; + + /* + * Populate capture list : at this point adps is already allocated and + * mapped to worst case size + */ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { - for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) { - ads_blob_write(ads, ads.capture_instance[i][j], addr); - ads_blob_write(ads, ads.capture_class[i][j], addr); + bool write_empty_list; + + for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) { + u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j); + /* null list if we dont have said engine or list */ + if (!engine_mask) { + ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); + ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); + continue; + } + + /* engine exists: start with engine-class registers */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + j, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_class[i][j], + ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, + ptr, size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); + } + + /* engine exists: next, engine-instance registers */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + j, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_instance[i][j], + ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, + ptr, size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); + } } - ads_blob_write(ads, ads.capture_global[i], addr); + /* global registers is last in our PF/VF loops */ + write_empty_list = get_capture_list(ads, guc, gt, i, + GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &total_size, &size, &ptr); + if (!write_empty_list) { + ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset); + xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, + size); + total_size += size; + capture_offset += size; + } else { + ads_blob_write(ads, ads.capture_global[i], null_ggtt); + } } + + if (ads->capture_size != PAGE_ALIGN(total_size)) + xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n", + ads->capture_size, PAGE_ALIGN(total_size)); + return PAGE_ALIGN(total_size); } static void guc_mmio_regset_write_one(struct xe_guc_ads *ads, @@ -684,7 +823,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads) if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) { u32 distdbreg = - xe_mmio_read32(gt, DIST_DBS_POPULATED); + xe_mmio_read32(>->mmio, DIST_DBS_POPULATED); ads_blob_write(ads, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI], @@ -738,7 +877,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_mmio_reg_state_init(ads); guc_prep_golden_lrc_null(ads); guc_mapping_table_init(gt, &info_map); - guc_capture_list_init(ads); + guc_capture_prep_lists(ads); guc_doorbell_init(ads); guc_waklv_init(ads); diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h index 2de5decfe0fd..70c132458ac3 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h @@ -22,6 +22,8 @@ struct xe_guc_ads { u32 regset_size; /** @ads_waklv_size: total waklv size supported by platform */ u32 ads_waklv_size; + /** @capture_size: size of register set passed to GuC for capture */ + u32 capture_size; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c new file mode 100644 index 000000000000..cc72446a5de1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -0,0 +1,1975 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#include <linux/types.h> + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "abi/guc_actions_abi.h" +#include "abi/guc_capture_abi.h" +#include "abi/guc_log_abi.h" +#include "regs/xe_engine_regs.h" +#include "regs/xe_gt_regs.h" +#include "regs/xe_guc_regs.h" +#include "regs/xe_regs.h" + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_exec_queue_types.h" +#include "xe_gt.h" +#include "xe_gt_mcr.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_guc_ads.h" +#include "xe_guc_capture.h" +#include "xe_guc_capture_types.h" +#include "xe_guc_ct.h" +#include "xe_guc_exec_queue_types.h" +#include "xe_guc_log.h" +#include "xe_guc_submit_types.h" +#include "xe_guc_submit.h" +#include "xe_hw_engine_types.h" +#include "xe_hw_engine.h" +#include "xe_lrc.h" +#include "xe_macros.h" +#include "xe_map.h" +#include "xe_mmio.h" +#include "xe_sched_job.h" + +/* + * struct __guc_capture_bufstate + * + * Book-keeping structure used to track read and write pointers + * as we extract error capture data from the GuC-log-buffer's + * error-capture region as a stream of dwords. + */ +struct __guc_capture_bufstate { + u32 size; + u32 data_offset; + u32 rd; + u32 wr; +}; + +/* + * struct __guc_capture_parsed_output - extracted error capture node + * + * A single unit of extracted error-capture output data grouped together + * at an engine-instance level. We keep these nodes in a linked list. + * See cachelist and outlist below. + */ +struct __guc_capture_parsed_output { + /* + * A single set of 3 capture lists: a global-list + * an engine-class-list and an engine-instance list. + * outlist in __guc_capture_parsed_output will keep + * a linked list of these nodes that will eventually + * be detached from outlist and attached into to + * xe_codedump in response to a context reset + */ + struct list_head link; + bool is_partial; + u32 eng_class; + u32 eng_inst; + u32 guc_id; + u32 lrca; + u32 type; + bool locked; + enum xe_hw_engine_snapshot_source_id source; + struct gcap_reg_list_info { + u32 vfid; + u32 num_regs; + struct guc_mmio_reg *regs; + } reginfo[GUC_STATE_CAPTURE_TYPE_MAX]; +#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_STATE_CAPTURE_TYPE_GLOBAL) +#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) +}; + +/* + * Define all device tables of GuC error capture register lists + * NOTE: + * For engine-registers, GuC only needs the register offsets + * from the engine-mmio-base + * + * 64 bit registers need 2 entries for low 32 bit register and high 32 bit + * register, for example: + * Register data_type flags mask Register name + * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, + * { XXX_REG_HI(0), REG_64BIT_HI_DW,, 0, 0, "XXX_REG"}, + * 1. data_type: Indicate is hi/low 32 bit for a 64 bit register + * A 64 bit register define requires 2 consecutive entries, + * with low dword first and hi dword the second. + * 2. Register name: null for incompleted define + */ +#define COMMON_XELP_BASE_GLOBAL \ + { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"} + +#define COMMON_BASE_ENGINE_INSTANCE \ + { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \ + { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \ + { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \ + { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \ + { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \ + { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \ + { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \ + { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \ + { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \ + { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \ + { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \ + { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \ + { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \ + { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \ + { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \ + { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \ + { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \ + { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \ + { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \ + { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ + { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"} + +#define COMMON_XELP_RC_CLASS \ + { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"} + +#define COMMON_XELP_RC_CLASS_INSTDONE \ + { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \ + { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \ + { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"} + +#define XELP_VEC_CLASS_REGS \ + { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \ + { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \ + { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \ + { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"} + +/* XE_LP Global */ +static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { + COMMON_XELP_BASE_GLOBAL, +}; + +/* Render / Compute Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_rc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Render / Compute Engine-Class */ +static const struct __guc_mmio_reg_descr xe_rc_class_regs[] = { + COMMON_XELP_RC_CLASS, + COMMON_XELP_RC_CLASS_INSTDONE, +}; + +/* Render / Compute Engine-Class for xehpg */ +static const struct __guc_mmio_reg_descr xe_hpg_rc_class_regs[] = { + COMMON_XELP_RC_CLASS, +}; + +/* Media Decode/Encode Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_vd_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Video Enhancement Engine-Class */ +static const struct __guc_mmio_reg_descr xe_vec_class_regs[] = { + XELP_VEC_CLASS_REGS, +}; + +/* Video Enhancement Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_vec_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* Blitter Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_blt_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* XE_LP - GSC Per-Engine-Instance */ +static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = { + COMMON_BASE_ENGINE_INSTANCE, +}; + +/* + * Empty list to prevent warnings about unknown class/instance types + * as not all class/instance types have entries on all platforms. + */ +static const struct __guc_mmio_reg_descr empty_regs_list[] = { +}; + +#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x) +#define TO_GCAP_DEF_TYPE(x) (GUC_STATE_CAPTURE_TYPE_##x) +#define MAKE_REGLIST(regslist, regsowner, regstype, class) \ + { \ + regslist, \ + ARRAY_SIZE(regslist), \ + TO_GCAP_DEF_OWNER(regsowner), \ + TO_GCAP_DEF_TYPE(regstype), \ + class \ + } + +/* List of lists for legacy graphic product version < 1255 */ +static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = { + MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + {} +}; + + /* List of lists for graphic product version >= 1255 */ +static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = { + MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), + MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), + MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), + MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), + {} +}; + +static const char * const capture_list_type_names[] = { + "Global", + "Class", + "Instance", +}; + +static const char * const capture_engine_class_names[] = { + "Render/Compute", + "Video", + "VideoEnhance", + "Blitter", + "GSC-Other", +}; + +struct __guc_capture_ads_cache { + bool is_valid; + void *ptr; + size_t size; + int status; +}; + +struct xe_guc_state_capture { + const struct __guc_mmio_reg_descr_group *reglists; + /** + * NOTE: steered registers have multiple instances depending on the HW configuration + * (slices or dual-sub-slices) and thus depends on HW fuses discovered + */ + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX] + [GUC_STATE_CAPTURE_TYPE_MAX] + [GUC_CAPTURE_LIST_CLASS_MAX]; + void *ads_null_cache; + struct list_head cachelist; +#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS) +#define PREALLOC_NODES_DEFAULT_NUMREGS 64 + + int max_mmio_per_node; + struct list_head outlist; +}; + +static void +guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node); + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_device_reglist(struct xe_device *xe) +{ + if (GRAPHICS_VERx100(xe) >= 1255) + return xe_hpg_lists; + else + return xe_lp_lists; +} + +static const struct __guc_mmio_reg_descr_group * +guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists, + u32 owner, u32 type, enum guc_capture_list_class_type capture_class) +{ + int i; + + if (!reglists) + return NULL; + + for (i = 0; reglists[i].list; ++i) { + if (reglists[i].owner == owner && reglists[i].type == type && + (reglists[i].engine == capture_class || + reglists[i].type == GUC_STATE_CAPTURE_TYPE_GLOBAL)) + return ®lists[i]; + } + + return NULL; +} + +const struct __guc_mmio_reg_descr_group * +xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, bool is_ext) +{ + const struct __guc_mmio_reg_descr_group *reglists; + + if (is_ext) { + struct xe_guc *guc = >->uc.guc; + + reglists = guc->capture->extlists; + } else { + reglists = guc_capture_get_device_reglist(gt_to_xe(gt)); + } + return guc_capture_get_one_list(reglists, owner, type, capture_class); +} + +struct __ext_steer_reg { + const char *name; + struct xe_reg_mcr reg; +}; + +static const struct __ext_steer_reg xe_extregs[] = { + {"SAMPLER_INSTDONE", SAMPLER_INSTDONE}, + {"ROW_INSTDONE", ROW_INSTDONE} +}; + +static const struct __ext_steer_reg xehpg_extregs[] = { + {"SC_INSTDONE", XEHPG_SC_INSTDONE}, + {"SC_INSTDONE_EXTRA", XEHPG_SC_INSTDONE_EXTRA}, + {"SC_INSTDONE_EXTRA2", XEHPG_SC_INSTDONE_EXTRA2}, + {"INSTDONE_GEOM_SVGUNIT", XEHPG_INSTDONE_GEOM_SVGUNIT} +}; + +static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, + const struct __ext_steer_reg *extlist, + int slice_id, int subslice_id) +{ + if (!ext || !extlist) + return; + + ext->reg = XE_REG(extlist->reg.__reg.addr); + ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); + ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); + ext->regname = extlist->name; +} + +static int +__alloc_ext_regs(struct drm_device *drm, struct __guc_mmio_reg_descr_group *newlist, + const struct __guc_mmio_reg_descr_group *rootlist, int num_regs) +{ + struct __guc_mmio_reg_descr *list; + + list = drmm_kzalloc(drm, num_regs * sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL); + if (!list) + return -ENOMEM; + + newlist->list = list; + newlist->num_regs = num_regs; + newlist->owner = rootlist->owner; + newlist->engine = rootlist->engine; + newlist->type = rootlist->type; + + return 0; +} + +static int guc_capture_get_steer_reg_num(struct xe_device *xe) +{ + int num = ARRAY_SIZE(xe_extregs); + + if (GRAPHICS_VERx100(xe) >= 1255) + num += ARRAY_SIZE(xehpg_extregs); + + return num; +} + +static void guc_capture_alloc_steered_lists(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + u16 slice, subslice; + int iter, i, total = 0; + const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_mmio_reg_descr_group *extlists; + struct __guc_mmio_reg_descr *extarray; + bool has_xehpg_extregs = GRAPHICS_VERx100(gt_to_xe(gt)) >= 1255; + struct drm_device *drm = >_to_xe(gt)->drm; + bool has_rcs_ccs = false; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + + /* + * If GT has no rcs/ccs, no need to alloc steered list. + * Currently, only rcs/ccs has steering register, if in the future, + * other engine types has steering register, this condition check need + * to be extended + */ + for_each_hw_engine(hwe, gt, id) { + if (xe_engine_class_to_guc_capture_class(hwe->class) == + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + has_rcs_ccs = true; + break; + } + } + + if (!has_rcs_ccs) + return; + + /* steered registers currently only exist for the render-class */ + list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE); + /* + * Skip if this platform has no engine class registers or if extlists + * was previously allocated + */ + if (!list || guc->capture->extlists) + return; + + total = bitmap_weight(gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask) * 8) * + guc_capture_get_steer_reg_num(guc_to_xe(guc)); + + if (!total) + return; + + /* allocate an extra for an end marker */ + extlists = drmm_kzalloc(drm, 2 * sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL); + if (!extlists) + return; + + if (__alloc_ext_regs(drm, &extlists[0], list, total)) { + drmm_kfree(drm, extlists); + return; + } + + /* For steering registers, the list is generated at run-time */ + extarray = (struct __guc_mmio_reg_descr *)extlists[0].list; + for_each_dss_steering(iter, gt, slice, subslice) { + for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) { + __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice); + ++extarray; + } + + if (has_xehpg_extregs) + for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) { + __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice); + ++extarray; + } + } + + extlists[0].num_regs = total; + + xe_gt_dbg(guc_to_gt(guc), "capture found %d ext-regs.\n", total); + guc->capture->extlists = extlists; +} + +static int +guc_capture_list_init(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, struct guc_mmio_reg *ptr, + u16 num_entries) +{ + u32 ptr_idx = 0, list_idx = 0; + const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; + struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; + const struct __guc_mmio_reg_descr_group *match; + u32 list_num; + + if (!reglists) + return -ENODEV; + + match = guc_capture_get_one_list(reglists, owner, type, capture_class); + if (!match) + return -ENODATA; + + list_num = match->num_regs; + for (list_idx = 0; ptr_idx < num_entries && list_idx < list_num; ++list_idx, ++ptr_idx) { + ptr[ptr_idx].offset = match->list[list_idx].reg.addr; + ptr[ptr_idx].value = 0xDEADF00D; + ptr[ptr_idx].flags = match->list[list_idx].flags; + ptr[ptr_idx].mask = match->list[list_idx].mask; + } + + match = guc_capture_get_one_list(extlists, owner, type, capture_class); + if (match) + for (ptr_idx = list_num, list_idx = 0; + ptr_idx < num_entries && list_idx < match->num_regs; + ++ptr_idx, ++list_idx) { + ptr[ptr_idx].offset = match->list[list_idx].reg.addr; + ptr[ptr_idx].value = 0xDEADF00D; + ptr[ptr_idx].flags = match->list[list_idx].flags; + ptr[ptr_idx].mask = match->list[list_idx].mask; + } + + if (ptr_idx < num_entries) + xe_gt_dbg(guc_to_gt(guc), "Got short capture reglist init: %d out-of %d.\n", + ptr_idx, num_entries); + + return 0; +} + +static int +guc_cap_list_num_regs(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class) +{ + const struct __guc_mmio_reg_descr_group *match; + int num_regs = 0; + + match = guc_capture_get_one_list(guc->capture->reglists, owner, type, capture_class); + if (match) + num_regs = match->num_regs; + + match = guc_capture_get_one_list(guc->capture->extlists, owner, type, capture_class); + if (match) + num_regs += match->num_regs; + else + /* + * If a caller wants the full register dump size but we have + * not yet got the hw-config, which is before max_mmio_per_node + * is initialized, then provide a worst-case number for + * extlists based on max dss fuse bits, but only ever for + * render/compute + */ + if (owner == GUC_CAPTURE_LIST_INDEX_PF && + type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE && + !guc->capture->max_mmio_per_node) + num_regs += guc_capture_get_steer_reg_num(guc_to_xe(guc)) * + XE_MAX_DSS_FUSE_BITS; + + return num_regs; +} + +static int +guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, + size_t *size, bool is_purpose_est) +{ + struct xe_guc_state_capture *gc = guc->capture; + struct xe_gt *gt = guc_to_gt(guc); + struct __guc_capture_ads_cache *cache; + int num_regs; + + xe_gt_assert(gt, type < GUC_STATE_CAPTURE_TYPE_MAX); + xe_gt_assert(gt, capture_class < GUC_CAPTURE_LIST_CLASS_MAX); + + cache = &gc->ads_cache[owner][type][capture_class]; + if (!gc->reglists) { + xe_gt_warn(gt, "No capture reglist for this device\n"); + return -ENODEV; + } + + if (cache->is_valid) { + *size = cache->size; + return cache->status; + } + + if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF && + !guc_capture_get_one_list(gc->reglists, owner, type, capture_class)) { + if (type == GUC_STATE_CAPTURE_TYPE_GLOBAL) + xe_gt_warn(gt, "Missing capture reglist: global!\n"); + else + xe_gt_warn(gt, "Missing capture reglist: %s(%u):%s(%u)!\n", + capture_list_type_names[type], type, + capture_engine_class_names[capture_class], capture_class); + return -ENODEV; + } + + num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class); + /* intentional empty lists can exist depending on hw config */ + if (!num_regs) + return -ENODATA; + + if (size) + *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + + (num_regs * sizeof(struct guc_mmio_reg))); + + return 0; +} + +/** + * xe_guc_capture_getlistsize - Get list size for owner/type/class combination + * @guc: The GuC object + * @owner: PF/VF owner + * @type: GuC capture register type + * @capture_class: GuC capture engine class id + * @size: Point to the size + * + * This function will get the list for the owner/type/class combination, and + * return the page aligned list size. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, size_t *size) +{ + return guc_capture_getlistsize(guc, owner, type, capture_class, size, false); +} + +/** + * xe_guc_capture_getlist - Get register capture list for owner/type/class + * combination + * @guc: The GuC object + * @owner: PF/VF owner + * @type: GuC capture register type + * @capture_class: GuC capture engine class id + * @outptr: Point to cached register capture list + * + * This function will get the register capture list for the owner/type/class + * combination. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, void **outptr) +{ + struct xe_guc_state_capture *gc = guc->capture; + struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][capture_class]; + struct guc_debug_capture_list *listnode; + int ret, num_regs; + u8 *caplist, *tmp; + size_t size = 0; + + if (!gc->reglists) + return -ENODEV; + + if (cache->is_valid) { + *outptr = cache->ptr; + return cache->status; + } + + ret = xe_guc_capture_getlistsize(guc, owner, type, capture_class, &size); + if (ret) { + cache->is_valid = true; + cache->ptr = NULL; + cache->size = 0; + cache->status = ret; + return ret; + } + + caplist = drmm_kzalloc(guc_to_drm(guc), size, GFP_KERNEL); + if (!caplist) + return -ENOMEM; + + /* populate capture list header */ + tmp = caplist; + num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class); + listnode = (struct guc_debug_capture_list *)tmp; + listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs); + + /* populate list of register descriptor */ + tmp += sizeof(struct guc_debug_capture_list); + guc_capture_list_init(guc, owner, type, capture_class, + (struct guc_mmio_reg *)tmp, num_regs); + + /* cache this list */ + cache->is_valid = true; + cache->ptr = caplist; + cache->size = size; + cache->status = 0; + + *outptr = caplist; + + return 0; +} + +/** + * xe_guc_capture_getnullheader - Get a null list for register capture + * @guc: The GuC object + * @outptr: Point to cached register capture list + * @size: Point to the size + * + * This function will alloc for a null list for register capture. + * + * Returns: 0 on success or a negative error code on failure. + */ +int +xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size) +{ + struct xe_guc_state_capture *gc = guc->capture; + int tmp = sizeof(u32) * 4; + void *null_header; + + if (gc->ads_null_cache) { + *outptr = gc->ads_null_cache; + *size = tmp; + return 0; + } + + null_header = drmm_kzalloc(guc_to_drm(guc), tmp, GFP_KERNEL); + if (!null_header) + return -ENOMEM; + + gc->ads_null_cache = null_header; + *outptr = null_header; + *size = tmp; + + return 0; +} + +/** + * xe_guc_capture_ads_input_worst_size - Calculate the worst size for GuC register capture + * @guc: point to xe_guc structure + * + * Calculate the worst size for GuC register capture by including all possible engines classes. + * + * Returns: Calculated size + */ +size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc) +{ + size_t total_size, class_size, instance_size, global_size; + int i, j; + + /* + * This function calculates the worst case register lists size by + * including all possible engines classes. It is called during the + * first of a two-phase GuC (and ADS-population) initialization + * sequence, that is, during the pre-hwconfig phase before we have + * the exact engine fusing info. + */ + total_size = PAGE_SIZE; /* Pad a page in front for empty lists */ + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { + for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) { + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + j, &class_size) < 0) + class_size = 0; + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + j, &instance_size) < 0) + instance_size = 0; + total_size += class_size + instance_size; + } + if (xe_guc_capture_getlistsize(guc, i, + GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &global_size) < 0) + global_size = 0; + total_size += global_size; + } + + return PAGE_ALIGN(total_size); +} + +static int guc_capture_output_size_est(struct xe_guc *guc) +{ + struct xe_gt *gt = guc_to_gt(guc); + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + + int capture_size = 0; + size_t tmp = 0; + + if (!guc->capture) + return -ENODEV; + + /* + * If every single engine-instance suffered a failure in quick succession but + * were all unrelated, then a burst of multiple error-capture events would dump + * registers for every one engine instance, one at a time. In this case, GuC + * would even dump the global-registers repeatedly. + * + * For each engine instance, there would be 1 x guc_state_capture_group_t output + * followed by 3 x guc_state_capture_t lists. The latter is how the register + * dumps are split across different register types (where the '3' are global vs class + * vs instance). + */ + for_each_hw_engine(hwe, gt, id) { + enum guc_capture_list_class_type capture_class; + + capture_class = xe_engine_class_to_guc_capture_class(hwe->class); + capture_size += sizeof(struct guc_state_capture_group_header_t) + + (3 * sizeof(struct guc_state_capture_header_t)); + + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_GLOBAL, + 0, &tmp, true)) + capture_size += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + capture_class, &tmp, true)) + capture_size += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, + capture_class, &tmp, true)) + capture_size += tmp; + } + + return capture_size; +} + +/* + * Add on a 3x multiplier to allow for multiple back-to-back captures occurring + * before the Xe can read the data out and process it + */ +#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3 + +static void check_guc_capture_size(struct xe_guc *guc) +{ + int capture_size = guc_capture_output_size_est(guc); + int spare_size = capture_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER; + u32 buffer_size = xe_guc_log_section_size_capture(&guc->log); + + /* + * NOTE: capture_size is much smaller than the capture region + * allocation (DG2: <80K vs 1MB). + * Additionally, its based on space needed to fit all engines getting + * reset at once within the same G2H handler task slot. This is very + * unlikely. However, if GuC really does run out of space for whatever + * reason, we will see an separate warning message when processing the + * G2H event capture-notification, search for: + * xe_guc_STATE_CAPTURE_EVENT_STATUS_NOSPACE. + */ + if (capture_size < 0) + xe_gt_dbg(guc_to_gt(guc), + "Failed to calculate error state capture buffer minimum size: %d!\n", + capture_size); + if (capture_size > buffer_size) + xe_gt_dbg(guc_to_gt(guc), "Error state capture buffer maybe small: %d < %d\n", + buffer_size, capture_size); + else if (spare_size > buffer_size) + xe_gt_dbg(guc_to_gt(guc), + "Error state capture buffer lacks spare size: %d < %d (min = %d)\n", + buffer_size, spare_size, capture_size); +} + +static void +guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node, + struct list_head *list) +{ + list_add(&node->link, list); +} + +static void +guc_capture_add_node_to_outlist(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_remove_stale_matches_from_list(gc, node); + guc_capture_add_node_to_list(node, &gc->outlist); +} + +static void +guc_capture_add_node_to_cachelist(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + guc_capture_add_node_to_list(node, &gc->cachelist); +} + +static void +guc_capture_free_outlist_node(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *n) +{ + if (n) { + n->locked = 0; + list_del(&n->link); + /* put node back to cache list */ + guc_capture_add_node_to_cachelist(gc, n); + } +} + +static void +guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc, + struct __guc_capture_parsed_output *node) +{ + struct __guc_capture_parsed_output *n, *ntmp; + int guc_id = node->guc_id; + + list_for_each_entry_safe(n, ntmp, &gc->outlist, link) { + if (n != node && !n->locked && n->guc_id == guc_id) + guc_capture_free_outlist_node(gc, n); + } +} + +static void +guc_capture_init_node(struct xe_guc *guc, struct __guc_capture_parsed_output *node) +{ + struct guc_mmio_reg *tmp[GUC_STATE_CAPTURE_TYPE_MAX]; + int i; + + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + tmp[i] = node->reginfo[i].regs; + memset(tmp[i], 0, sizeof(struct guc_mmio_reg) * + guc->capture->max_mmio_per_node); + } + memset(node, 0, sizeof(*node)); + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) + node->reginfo[i].regs = tmp[i]; + + INIT_LIST_HEAD(&node->link); +} + +/** + * DOC: Init, G2H-event and reporting flows for GuC-error-capture + * + * KMD Init time flows: + * -------------------- + * --> alloc A: GuC input capture regs lists (registered to GuC via ADS). + * xe_guc_ads acquires the register lists by calling + * xe_guc_capture_getlistsize and xe_guc_capture_getlist 'n' times, + * where n = 1 for global-reg-list + + * num_engine_classes for class-reg-list + + * num_engine_classes for instance-reg-list + * (since all instances of the same engine-class type + * have an identical engine-instance register-list). + * ADS module also calls separately for PF vs VF. + * + * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param)) + * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small) + * Note2: 'x 3' to hold multiple capture groups + * + * GUC Runtime notify capture: + * -------------------------- + * --> G2H STATE_CAPTURE_NOTIFICATION + * L--> xe_guc_capture_process + * L--> Loop through B (head..tail) and for each engine instance's + * err-state-captured register-list we find, we alloc 'C': + * --> alloc C: A capture-output-node structure that includes misc capture info along + * with 3 register list dumps (global, engine-class and engine-instance) + * This node is created from a pre-allocated list of blank nodes in + * guc->capture->cachelist and populated with the error-capture + * data from GuC and then it's added into guc->capture->outlist linked + * list. This list is used for matchup and printout by xe_devcoredump_read + * and xe_engine_snapshot_print, (when user invokes the devcoredump sysfs). + * + * GUC --> notify context reset: + * ----------------------------- + * --> guc_exec_queue_timedout_job + * L--> xe_devcoredump + * L--> devcoredump_snapshot + * --> xe_hw_engine_snapshot_capture + * --> xe_engine_manual_capture(For manual capture) + * + * User Sysfs / Debugfs + * -------------------- + * --> xe_devcoredump_read-> + * L--> xxx_snapshot_print + * L--> xe_engine_snapshot_print + * Print register lists values saved at + * guc->capture->outlist + * + */ + +static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf) +{ + if (buf->wr >= buf->rd) + return (buf->wr - buf->rd); + return (buf->size - buf->rd) + buf->wr; +} + +static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf) +{ + if (buf->rd > buf->wr) + return (buf->size - buf->rd); + return (buf->wr - buf->rd); +} + +/* + * GuC's error-capture output is a ring buffer populated in a byte-stream fashion: + * + * The GuC Log buffer region for error-capture is managed like a ring buffer. + * The GuC firmware dumps error capture logs into this ring in a byte-stream flow. + * Additionally, as per the current and foreseeable future, all packed error- + * capture output structures are dword aligned. + * + * That said, if the GuC firmware is in the midst of writing a structure that is larger + * than one dword but the tail end of the err-capture buffer-region has lesser space left, + * we would need to extract that structure one dword at a time straddled across the end, + * onto the start of the ring. + * + * Below function, guc_capture_log_remove_bytes is a helper for that. All callers of this + * function would typically do a straight-up memcpy from the ring contents and will only + * call this helper if their structure-extraction is straddling across the end of the + * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease + * scalability for future expansion of output data types without requiring a redesign + * of the flow controls. + */ +static int +guc_capture_log_remove_bytes(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + void *out, int bytes_needed) +{ +#define GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX 3 + + int fill_size = 0, tries = GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX; + int copy_size, avail; + + xe_assert(guc_to_xe(guc), bytes_needed % sizeof(u32) == 0); + + if (bytes_needed > guc_capture_buf_cnt(buf)) + return -1; + + while (bytes_needed > 0 && tries--) { + int misaligned; + + avail = guc_capture_buf_cnt_to_end(buf); + misaligned = avail % sizeof(u32); + /* wrap if at end */ + if (!avail) { + /* output stream clipped */ + if (!buf->rd) + return fill_size; + buf->rd = 0; + continue; + } + + /* Only copy to u32 aligned data */ + copy_size = avail < bytes_needed ? avail - misaligned : bytes_needed; + xe_map_memcpy_from(guc_to_xe(guc), out + fill_size, &guc->log.bo->vmap, + buf->data_offset + buf->rd, copy_size); + buf->rd += copy_size; + fill_size += copy_size; + bytes_needed -= copy_size; + + if (misaligned) + xe_gt_warn(guc_to_gt(guc), + "Bytes extraction not dword aligned, clipping.\n"); + } + + return fill_size; +} + +static int +guc_capture_log_get_group_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_group_header_t *ghdr) +{ + int fullsize = sizeof(struct guc_state_capture_group_header_t); + + if (guc_capture_log_remove_bytes(guc, buf, ghdr, fullsize) != fullsize) + return -1; + return 0; +} + +static int +guc_capture_log_get_data_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_state_capture_header_t *hdr) +{ + int fullsize = sizeof(struct guc_state_capture_header_t); + + if (guc_capture_log_remove_bytes(guc, buf, hdr, fullsize) != fullsize) + return -1; + return 0; +} + +static int +guc_capture_log_get_register(struct xe_guc *guc, struct __guc_capture_bufstate *buf, + struct guc_mmio_reg *reg) +{ + int fullsize = sizeof(struct guc_mmio_reg); + + if (guc_capture_log_remove_bytes(guc, buf, reg, fullsize) != fullsize) + return -1; + return 0; +} + +static struct __guc_capture_parsed_output * +guc_capture_get_prealloc_node(struct xe_guc *guc) +{ + struct __guc_capture_parsed_output *found = NULL; + + if (!list_empty(&guc->capture->cachelist)) { + struct __guc_capture_parsed_output *n, *ntmp; + + /* get first avail node from the cache list */ + list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) { + found = n; + break; + } + } else { + struct __guc_capture_parsed_output *n, *ntmp; + + /* + * traverse reversed and steal back the oldest node already + * allocated + */ + list_for_each_entry_safe_reverse(n, ntmp, &guc->capture->outlist, link) { + if (!n->locked) + found = n; + } + } + if (found) { + list_del(&found->link); + guc_capture_init_node(guc, found); + } + + return found; +} + +static struct __guc_capture_parsed_output * +guc_capture_clone_node(struct xe_guc *guc, struct __guc_capture_parsed_output *original, + u32 keep_reglist_mask) +{ + struct __guc_capture_parsed_output *new; + int i; + + new = guc_capture_get_prealloc_node(guc); + if (!new) + return NULL; + if (!original) + return new; + + new->is_partial = original->is_partial; + + /* copy reg-lists that we want to clone */ + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + if (keep_reglist_mask & BIT(i)) { + XE_WARN_ON(original->reginfo[i].num_regs > + guc->capture->max_mmio_per_node); + + memcpy(new->reginfo[i].regs, original->reginfo[i].regs, + original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg)); + + new->reginfo[i].num_regs = original->reginfo[i].num_regs; + new->reginfo[i].vfid = original->reginfo[i].vfid; + + if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) { + new->eng_class = original->eng_class; + } else if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + new->eng_inst = original->eng_inst; + new->guc_id = original->guc_id; + new->lrca = original->lrca; + } + } + } + + return new; +} + +static int +guc_capture_extract_reglists(struct xe_guc *guc, struct __guc_capture_bufstate *buf) +{ + struct xe_gt *gt = guc_to_gt(guc); + struct guc_state_capture_group_header_t ghdr = {0}; + struct guc_state_capture_header_t hdr = {0}; + struct __guc_capture_parsed_output *node = NULL; + struct guc_mmio_reg *regs = NULL; + int i, numlists, numregs, ret = 0; + enum guc_state_capture_type datatype; + struct guc_mmio_reg tmp; + bool is_partial = false; + + i = guc_capture_buf_cnt(buf); + if (!i) + return -ENODATA; + + if (i % sizeof(u32)) { + xe_gt_warn(gt, "Got mis-aligned register capture entries\n"); + ret = -EIO; + goto bailout; + } + + /* first get the capture group header */ + if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) { + ret = -EIO; + goto bailout; + } + /* + * we would typically expect a layout as below where n would be expected to be + * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine + * instances being reset together. + * ____________________________________________ + * | Capture Group | + * | ________________________________________ | + * | | Capture Group Header: | | + * | | - num_captures = 5 | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture1: | | + * | | Hdr: GLOBAL, numregs=a | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rega | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture2: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regb | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture3: | | + * | | Hdr: INSTANCE=RCS, numregs=c | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regc | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture4: | | + * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... regd | | | + * | | |__________________________________| | | + * | |______________________________________| | + * | ________________________________________ | + * | | Capture5: | | + * | | Hdr: INSTANCE=CCS0, numregs=e | | + * | | ____________________________________ | | + * | | | Reglist | | | + * | | | - reg1, reg2, ... rege | | | + * | | |__________________________________| | | + * | |______________________________________| | + * |__________________________________________| + */ + is_partial = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE, ghdr.info); + numlists = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES, ghdr.info); + + while (numlists--) { + if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) { + ret = -EIO; + break; + } + + datatype = FIELD_GET(GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE, hdr.info); + if (datatype > GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + /* unknown capture type - skip over to next capture set */ + numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES, + hdr.num_mmio_entries); + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, &tmp)) { + ret = -EIO; + break; + } + } + continue; + } else if (node) { + /* + * Based on the current capture type and what we have so far, + * decide if we should add the current node into the internal + * linked list for match-up when xe_devcoredump calls later + * (and alloc a blank node for the next set of reglists) + * or continue with the same node or clone the current node + * but only retain the global or class registers (such as the + * case of dependent engine resets). + */ + if (datatype == GUC_STATE_CAPTURE_TYPE_GLOBAL) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) { + /* Add to list, clone node and duplicate global list */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + GCAP_PARSED_REGLIST_INDEX_GLOBAL); + } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE && + node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) { + /* Add to list, clone node and duplicate global + class lists */ + guc_capture_add_node_to_outlist(guc->capture, node); + node = guc_capture_clone_node(guc, node, + (GCAP_PARSED_REGLIST_INDEX_GLOBAL | + GCAP_PARSED_REGLIST_INDEX_ENGCLASS)); + } + } + + if (!node) { + node = guc_capture_get_prealloc_node(guc); + if (!node) { + ret = -ENOMEM; + break; + } + if (datatype != GUC_STATE_CAPTURE_TYPE_GLOBAL) + xe_gt_dbg(gt, "Register capture missing global dump: %08x!\n", + datatype); + } + node->is_partial = is_partial; + node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner); + node->source = XE_ENGINE_CAPTURE_SOURCE_GUC; + node->type = datatype; + + switch (datatype) { + case GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE: + node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, + hdr.info); + node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE, + hdr.info); + node->lrca = hdr.lrca; + node->guc_id = hdr.guc_id; + break; + case GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS: + node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS, + hdr.info); + break; + default: + break; + } + + numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES, + hdr.num_mmio_entries); + if (numregs > guc->capture->max_mmio_per_node) { + xe_gt_dbg(gt, "Register capture list extraction clipped by prealloc!\n"); + numregs = guc->capture->max_mmio_per_node; + } + node->reginfo[datatype].num_regs = numregs; + regs = node->reginfo[datatype].regs; + i = 0; + while (numregs--) { + if (guc_capture_log_get_register(guc, buf, ®s[i++])) { + ret = -EIO; + break; + } + } + } + +bailout: + if (node) { + /* If we have data, add to linked list for match-up when xe_devcoredump calls */ + for (i = GUC_STATE_CAPTURE_TYPE_GLOBAL; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + if (node->reginfo[i].regs) { + guc_capture_add_node_to_outlist(guc->capture, node); + node = NULL; + break; + } + } + if (node) /* else return it back to cache list */ + guc_capture_add_node_to_cachelist(guc->capture, node); + } + return ret; +} + +static int __guc_capture_flushlog_complete(struct xe_guc *guc) +{ + u32 action[] = { + XE_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, + GUC_LOG_BUFFER_CAPTURE + }; + + return xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action)); +} + +static void __guc_capture_process_output(struct xe_guc *guc) +{ + unsigned int buffer_size, read_offset, write_offset, full_count; + struct xe_uc *uc = container_of(guc, typeof(*uc), guc); + struct guc_log_buffer_state log_buf_state_local; + struct __guc_capture_bufstate buf; + bool new_overflow; + int ret, tmp; + u32 log_buf_state_offset; + u32 src_data_offset; + + log_buf_state_offset = sizeof(struct guc_log_buffer_state) * GUC_LOG_BUFFER_CAPTURE; + src_data_offset = xe_guc_get_log_buffer_offset(&guc->log, GUC_LOG_BUFFER_CAPTURE); + + /* + * Make a copy of the state structure, inside GuC log buffer + * (which is uncached mapped), on the stack to avoid reading + * from it multiple times. + */ + xe_map_memcpy_from(guc_to_xe(guc), &log_buf_state_local, &guc->log.bo->vmap, + log_buf_state_offset, sizeof(struct guc_log_buffer_state)); + + buffer_size = xe_guc_get_log_buffer_size(&guc->log, GUC_LOG_BUFFER_CAPTURE); + read_offset = log_buf_state_local.read_ptr; + write_offset = log_buf_state_local.sampled_write_ptr; + full_count = FIELD_GET(GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT, log_buf_state_local.flags); + + /* Bookkeeping stuff */ + tmp = FIELD_GET(GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE, log_buf_state_local.flags); + guc->log.stats[GUC_LOG_BUFFER_CAPTURE].flush += tmp; + new_overflow = xe_guc_check_log_buf_overflow(&guc->log, GUC_LOG_BUFFER_CAPTURE, + full_count); + + /* Now copy the actual logs. */ + if (unlikely(new_overflow)) { + /* copy the whole buffer in case of overflow */ + read_offset = 0; + write_offset = buffer_size; + } else if (unlikely((read_offset > buffer_size) || + (write_offset > buffer_size))) { + xe_gt_err(guc_to_gt(guc), + "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n", + read_offset, buffer_size); + /* copy whole buffer as offsets are unreliable */ + read_offset = 0; + write_offset = buffer_size; + } + + buf.size = buffer_size; + buf.rd = read_offset; + buf.wr = write_offset; + buf.data_offset = src_data_offset; + + if (!xe_guc_read_stopped(guc)) { + do { + ret = guc_capture_extract_reglists(guc, &buf); + if (ret && ret != -ENODATA) + xe_gt_dbg(guc_to_gt(guc), "Capture extraction failed:%d\n", ret); + } while (ret >= 0); + } + + /* Update the state of log buffer err-cap state */ + xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, + log_buf_state_offset + offsetof(struct guc_log_buffer_state, read_ptr), u32, + write_offset); + + /* + * Clear the flush_to_file from local first, the local was loaded by above + * xe_map_memcpy_from, then write out the "updated local" through + * xe_map_wr() + */ + log_buf_state_local.flags &= ~GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE; + xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, + log_buf_state_offset + offsetof(struct guc_log_buffer_state, flags), u32, + log_buf_state_local.flags); + __guc_capture_flushlog_complete(guc); +} + +/* + * xe_guc_capture_process - Process GuC register captured data + * @guc: The GuC object + * + * When GuC captured data is ready, GuC will send message + * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be + * called to process the data comes with the message. + * + * Returns: None + */ +void xe_guc_capture_process(struct xe_guc *guc) +{ + if (guc->capture) + __guc_capture_process_output(guc); +} + +static struct __guc_capture_parsed_output * +guc_capture_alloc_one_node(struct xe_guc *guc) +{ + struct drm_device *drm = guc_to_drm(guc); + struct __guc_capture_parsed_output *new; + int i; + + new = drmm_kzalloc(drm, sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) { + new->reginfo[i].regs = drmm_kzalloc(drm, guc->capture->max_mmio_per_node * + sizeof(struct guc_mmio_reg), GFP_KERNEL); + if (!new->reginfo[i].regs) { + while (i) + drmm_kfree(drm, new->reginfo[--i].regs); + drmm_kfree(drm, new); + return NULL; + } + } + guc_capture_init_node(guc, new); + + return new; +} + +static void +__guc_capture_create_prealloc_nodes(struct xe_guc *guc) +{ + struct __guc_capture_parsed_output *node = NULL; + int i; + + for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) { + node = guc_capture_alloc_one_node(guc); + if (!node) { + xe_gt_warn(guc_to_gt(guc), "Register capture pre-alloc-cache failure\n"); + /* dont free the priors, use what we got and cleanup at shutdown */ + return; + } + guc_capture_add_node_to_cachelist(guc->capture, node); + } +} + +static int +guc_get_max_reglist_count(struct xe_guc *guc) +{ + int i, j, k, tmp, maxregcount = 0; + + for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { + for (j = 0; j < GUC_STATE_CAPTURE_TYPE_MAX; ++j) { + for (k = 0; k < GUC_CAPTURE_LIST_CLASS_MAX; ++k) { + const struct __guc_mmio_reg_descr_group *match; + + if (j == GUC_STATE_CAPTURE_TYPE_GLOBAL && k > 0) + continue; + + tmp = 0; + match = guc_capture_get_one_list(guc->capture->reglists, i, j, k); + if (match) + tmp = match->num_regs; + + match = guc_capture_get_one_list(guc->capture->extlists, i, j, k); + if (match) + tmp += match->num_regs; + + if (tmp > maxregcount) + maxregcount = tmp; + } + } + } + if (!maxregcount) + maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS; + + return maxregcount; +} + +static void +guc_capture_create_prealloc_nodes(struct xe_guc *guc) +{ + /* skip if we've already done the pre-alloc */ + if (guc->capture->max_mmio_per_node) + return; + + guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); + __guc_capture_create_prealloc_nodes(guc); +} + +static void +read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_group *list, + struct guc_mmio_reg *regs) +{ + int i; + + if (!list || !list->list || list->num_regs == 0) + return; + + if (!regs) + return; + + for (i = 0; i < list->num_regs; i++) { + struct __guc_mmio_reg_descr desc = list->list[i]; + u32 value; + + if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) { + value = xe_hw_engine_mmio_read32(hwe, desc.reg); + } else { + if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS && + FIELD_GET(GUC_REGSET_STEERING_NEEDED, desc.flags)) { + int group, instance; + + group = FIELD_GET(GUC_REGSET_STEERING_GROUP, desc.flags); + instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, desc.flags); + value = xe_gt_mcr_unicast_read(hwe->gt, XE_REG_MCR(desc.reg.addr), + group, instance); + } else { + value = xe_mmio_read32(&hwe->gt->mmio, desc.reg); + } + } + + regs[i].value = value; + regs[i].offset = desc.reg.addr; + regs[i].flags = desc.flags; + regs[i].mask = desc.mask; + } +} + +/** + * xe_engine_manual_capture - Take a manual engine snapshot from engine. + * @hwe: Xe HW Engine. + * @snapshot: The engine snapshot + * + * Take engine snapshot from engine read. + * + * Returns: None + */ +void +xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot) +{ + struct xe_gt *gt = hwe->gt; + struct xe_device *xe = gt_to_xe(gt); + struct xe_guc *guc = >->uc.guc; + struct xe_devcoredump *devcoredump = &xe->devcoredump; + enum guc_capture_list_class_type capture_class; + const struct __guc_mmio_reg_descr_group *list; + struct __guc_capture_parsed_output *new; + enum guc_state_capture_type type; + u16 guc_id = 0; + u32 lrca = 0; + + if (IS_SRIOV_VF(xe)) + return; + + new = guc_capture_get_prealloc_node(guc); + if (!new) + return; + + capture_class = xe_engine_class_to_guc_capture_class(hwe->class); + for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + struct gcap_reg_list_info *reginfo = &new->reginfo[type]; + /* + * regsinfo->regs is allocated based on guc->capture->max_mmio_per_node + * which is based on the descriptor list driving the population so + * should not overflow + */ + + /* Get register list for the type/class */ + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, + capture_class, false); + if (!list) { + xe_gt_dbg(gt, "Empty GuC capture register descriptor for %s", + hwe->name); + continue; + } + + read_reg_to_node(hwe, list, reginfo->regs); + reginfo->num_regs = list->num_regs; + + /* Capture steering registers for rcs/ccs */ + if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, + type, capture_class, true); + if (list) { + read_reg_to_node(hwe, list, ®info->regs[reginfo->num_regs]); + reginfo->num_regs += list->num_regs; + } + } + } + + if (devcoredump && devcoredump->captured) { + struct xe_guc_submit_exec_queue_snapshot *ge = devcoredump->snapshot.ge; + + if (ge) { + guc_id = ge->guc.id; + if (ge->lrc[0]) + lrca = ge->lrc[0]->context_desc; + } + } + + new->eng_class = xe_engine_class_to_guc_class(hwe->class); + new->eng_inst = hwe->instance; + new->guc_id = guc_id; + new->lrca = lrca; + new->is_partial = 0; + new->locked = 1; + new->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; + + guc_capture_add_node_to_outlist(guc->capture, new); + devcoredump->snapshot.matched_node = new; +} + +static struct guc_mmio_reg * +guc_capture_find_reg(struct gcap_reg_list_info *reginfo, u32 addr, u32 flags) +{ + int i; + + if (reginfo && reginfo->num_regs > 0) { + struct guc_mmio_reg *regs = reginfo->regs; + + if (regs) + for (i = 0; i < reginfo->num_regs; i++) + if (regs[i].offset == addr && regs[i].flags == flags) + return ®s[i]; + } + + return NULL; +} + +static void +snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p, + u32 type, const struct __guc_mmio_reg_descr_group *list) +{ + struct xe_gt *gt = snapshot->hwe->gt; + struct xe_device *xe = gt_to_xe(gt); + struct xe_guc *guc = >->uc.guc; + struct xe_devcoredump *devcoredump = &xe->devcoredump; + struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot; + struct gcap_reg_list_info *reginfo = NULL; + u32 last_value, i; + bool is_ext; + + if (!list || list->num_regs == 0) + return; + XE_WARN_ON(!devcore_snapshot->matched_node); + + is_ext = list == guc->capture->extlists; + reginfo = &devcore_snapshot->matched_node->reginfo[type]; + + /* + * loop through descriptor first and find the register in the node + * this is more scalable for developer maintenance as it will ensure + * the printout matched the ordering of the static descriptor + * table-of-lists + */ + for (i = 0; i < list->num_regs; i++) { + const struct __guc_mmio_reg_descr *reg_desc = &list->list[i]; + struct guc_mmio_reg *reg; + u32 value; + + reg = guc_capture_find_reg(reginfo, reg_desc->reg.addr, reg_desc->flags); + if (!reg) + continue; + + value = reg->value; + if (reg_desc->data_type == REG_64BIT_LOW_DW) { + last_value = value; + /* Low 32 bit dword saved, continue for high 32 bit */ + continue; + } else if (reg_desc->data_type == REG_64BIT_HI_DW) { + u64 value_qw = ((u64)value << 32) | last_value; + + drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw); + continue; + } + + if (is_ext) { + int dss, group, instance; + + group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags); + instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags); + dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance); + + drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value); + } else { + drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); + } + } +} + +/** + * xe_engine_snapshot_print - Print out a given Xe HW Engine snapshot. + * @snapshot: Xe HW Engine snapshot object. + * @p: drm_printer where it will be printed out. + * + * This function prints out a given Xe HW Engine snapshot object. + */ +void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p) +{ + const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = { + "full-capture", + "partial-capture" + }; + int type; + const struct __guc_mmio_reg_descr_group *list; + enum guc_capture_list_class_type capture_class; + + struct xe_gt *gt; + struct xe_device *xe; + struct xe_devcoredump *devcoredump; + struct xe_devcoredump_snapshot *devcore_snapshot; + + if (!snapshot) + return; + + gt = snapshot->hwe->gt; + xe = gt_to_xe(gt); + devcoredump = &xe->devcoredump; + devcore_snapshot = &devcoredump->snapshot; + + if (!devcore_snapshot->matched_node) + return; + + xe_gt_assert(gt, snapshot->source <= XE_ENGINE_CAPTURE_SOURCE_GUC); + xe_gt_assert(gt, snapshot->hwe); + + capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class); + + drm_printf(p, "%s (physical), logical instance=%d\n", + snapshot->name ? snapshot->name : "", + snapshot->logical_instance); + drm_printf(p, "\tCapture_source: %s\n", + snapshot->source == XE_ENGINE_CAPTURE_SOURCE_GUC ? "GuC" : "Manual"); + drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]); + drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n", + snapshot->forcewake.domain, snapshot->forcewake.ref); + drm_printf(p, "\tReserved: %s\n", + str_yes_no(snapshot->kernel_reserved)); + + for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, + capture_class, false); + snapshot_print_by_list_order(snapshot, p, type, list); + } + + if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) { + list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, + GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + capture_class, true); + snapshot_print_by_list_order(snapshot, p, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, + list); + } + + drm_puts(p, "\n"); +} + +/** + * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the job. + * @job: The job object. + * + * Search within the capture outlist for the job, could be used for check if + * GuC capture is ready for the job. + * If found, the locked boolean of the node will be flagged. + * + * Returns: found guc-capture node ptr else NULL + */ +struct __guc_capture_parsed_output * +xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job) +{ + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + struct xe_exec_queue *q; + struct xe_device *xe; + u16 guc_class = GUC_LAST_ENGINE_CLASS + 1; + struct xe_devcoredump_snapshot *ss; + + if (!job) + return NULL; + + q = job->q; + if (!q || !q->gt) + return NULL; + + xe = gt_to_xe(q->gt); + if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe)) + return NULL; + + ss = &xe->devcoredump.snapshot; + if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC) + return ss->matched_node; + + /* Find hwe for the job */ + for_each_hw_engine(hwe, q->gt, id) { + if (hwe != q->hwe) + continue; + guc_class = xe_engine_class_to_guc_class(hwe->class); + break; + } + + if (guc_class <= GUC_LAST_ENGINE_CLASS) { + struct __guc_capture_parsed_output *n, *ntmp; + struct xe_guc *guc = &q->gt->uc.guc; + u16 guc_id = q->guc->id; + u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]); + + /* + * Look for a matching GuC reported error capture node from + * the internal output link-list based on engine, guc id and + * lrca info. + */ + list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { + if (n->eng_class == guc_class && n->eng_inst == hwe->instance && + n->guc_id == guc_id && n->lrca == lrca && + n->source == XE_ENGINE_CAPTURE_SOURCE_GUC) { + n->locked = 1; + return n; + } + } + } + return NULL; +} + +/** + * xe_engine_snapshot_capture_for_job - Take snapshot of associated engine + * @job: The job object + * + * Take snapshot of associated HW Engine + * + * Returns: None. + */ +void +xe_engine_snapshot_capture_for_job(struct xe_sched_job *job) +{ + struct xe_exec_queue *q = job->q; + struct xe_device *xe = gt_to_xe(q->gt); + struct xe_devcoredump *coredump = &xe->devcoredump; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + u32 adj_logical_mask = q->logical_mask; + + if (IS_SRIOV_VF(xe)) + return; + + for_each_hw_engine(hwe, q->gt, id) { + if (hwe->class != q->hwe->class || + !(BIT(hwe->logical_instance) & adj_logical_mask)) { + coredump->snapshot.hwe[id] = NULL; + continue; + } + + if (!coredump->snapshot.hwe[id]) { + coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe, job); + } else { + struct __guc_capture_parsed_output *new; + + new = xe_guc_capture_get_matching_and_lock(job); + if (new) { + struct xe_guc *guc = &q->gt->uc.guc; + + /* + * If we are in here, it means we found a fresh + * GuC-err-capture node for this engine after + * previously failing to find a match in the + * early part of guc_exec_queue_timedout_job. + * Thus we must free the manually captured node + */ + guc_capture_free_outlist_node(guc->capture, + coredump->snapshot.matched_node); + coredump->snapshot.matched_node = new; + } + } + + break; + } +} + +/* + * xe_guc_capture_put_matched_nodes - Cleanup macthed nodes + * @guc: The GuC object + * + * Free matched node and all nodes with the equal guc_id from + * GuC captured outlist + */ +void xe_guc_capture_put_matched_nodes(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_devcoredump *devcoredump = &xe->devcoredump; + struct __guc_capture_parsed_output *n = devcoredump->snapshot.matched_node; + + if (n) { + guc_capture_remove_stale_matches_from_list(guc->capture, n); + guc_capture_free_outlist_node(guc->capture, n); + devcoredump->snapshot.matched_node = NULL; + } +} + +/* + * xe_guc_capture_steered_list_init - Init steering register list + * @guc: The GuC object + * + * Init steering register list for GuC register capture, create pre-alloc node + */ +void xe_guc_capture_steered_list_init(struct xe_guc *guc) +{ + /* + * For certain engine classes, there are slice and subslice + * level registers requiring steering. We allocate and populate + * these based on hw config and add it as an extension list at + * the end of the pre-populated render list. + */ + guc_capture_alloc_steered_lists(guc); + check_guc_capture_size(guc); + guc_capture_create_prealloc_nodes(guc); +} + +/* + * xe_guc_capture_init - Init for GuC register capture + * @guc: The GuC object + * + * Init for GuC register capture, alloc memory for capture data structure. + * + * Returns: 0 if success. + * -ENOMEM if out of memory + */ +int xe_guc_capture_init(struct xe_guc *guc) +{ + guc->capture = drmm_kzalloc(guc_to_drm(guc), sizeof(*guc->capture), GFP_KERNEL); + if (!guc->capture) + return -ENOMEM; + + guc->capture->reglists = guc_capture_get_device_reglist(guc_to_xe(guc)); + + INIT_LIST_HEAD(&guc->capture->outlist); + INIT_LIST_HEAD(&guc->capture->cachelist); + + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h new file mode 100644 index 000000000000..97a795d13dd1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#ifndef _XE_GUC_CAPTURE_H +#define _XE_GUC_CAPTURE_H + +#include <linux/types.h> +#include "abi/guc_capture_abi.h" +#include "xe_guc.h" +#include "xe_guc_fwif.h" + +struct xe_guc; +struct xe_hw_engine; +struct xe_hw_engine_snapshot; +struct xe_sched_job; + +static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class) +{ + switch (class) { + case GUC_RENDER_CLASS: + case GUC_COMPUTE_CLASS: + return GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE; + case GUC_GSC_OTHER_CLASS: + return GUC_CAPTURE_LIST_CLASS_GSC_OTHER; + case GUC_VIDEO_CLASS: + case GUC_VIDEOENHANCE_CLASS: + case GUC_BLITTER_CLASS: + return class; + default: + XE_WARN_ON(class); + return GUC_CAPTURE_LIST_CLASS_MAX; + } +} + +static inline enum guc_capture_list_class_type +xe_engine_class_to_guc_capture_class(enum xe_engine_class class) +{ + return xe_guc_class_to_capture_class(xe_engine_class_to_guc_class(class)); +} + +void xe_guc_capture_process(struct xe_guc *guc); +int xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, void **outptr); +int xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, size_t *size); +int xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size); +size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc); +const struct __guc_mmio_reg_descr_group * +xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type, + enum guc_capture_list_class_type capture_class, bool is_ext); +struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_sched_job *job); +void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot); +void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p); +void xe_engine_snapshot_capture_for_job(struct xe_sched_job *job); +void xe_guc_capture_steered_list_init(struct xe_guc *guc); +void xe_guc_capture_put_matched_nodes(struct xe_guc *guc); +int xe_guc_capture_init(struct xe_guc *guc); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h new file mode 100644 index 000000000000..2057125b1bfa --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021-2024 Intel Corporation + */ + +#ifndef _XE_GUC_CAPTURE_TYPES_H +#define _XE_GUC_CAPTURE_TYPES_H + +#include <linux/types.h> +#include "regs/xe_reg_defs.h" + +struct xe_guc; + +/* data type of the register in register list */ +enum capture_register_data_type { + REG_32BIT = 0, + REG_64BIT_LOW_DW, + REG_64BIT_HI_DW, +}; + +/** + * struct __guc_mmio_reg_descr - GuC mmio register descriptor + * + * xe_guc_capture module uses these structures to define a register + * (offsets, names, flags,...) that are used at the ADS regisration + * time as well as during runtime processing and reporting of error- + * capture states generated by GuC just prior to engine reset events. + */ +struct __guc_mmio_reg_descr { + /** @reg: the register */ + struct xe_reg reg; + /** + * @data_type: data type of the register + * Could be 32 bit, low or hi dword of a 64 bit, see enum + * register_data_type + */ + enum capture_register_data_type data_type; + /** @flags: Flags for the register */ + u32 flags; + /** @mask: The mask to apply */ + u32 mask; + /** @regname: Name of the register */ + const char *regname; +}; + +/** + * struct __guc_mmio_reg_descr_group - The group of register descriptor + * + * xe_guc_capture module uses these structures to maintain static + * tables (per unique platform) that consists of lists of registers + * (offsets, names, flags,...) that are used at the ADS regisration + * time as well as during runtime processing and reporting of error- + * capture states generated by GuC just prior to engine reset events. + */ +struct __guc_mmio_reg_descr_group { + /** @list: The register list */ + const struct __guc_mmio_reg_descr *list; + /** @num_regs: Count of registers in the list */ + u32 num_regs; + /** @owner: PF/VF owner, see enum guc_capture_list_index_type */ + u32 owner; + /** @type: Capture register type, see enum guc_state_capture_type */ + u32 type; + /** @engine: The engine class, see enum guc_capture_list_class_type */ + u32 engine; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 9c505d3517cd..8aeb1789805c 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -8,6 +8,7 @@ #include <linux/bitfield.h> #include <linux/circ_buf.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <kunit/static_stub.h> @@ -17,6 +18,7 @@ #include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" #include "xe_bo.h" +#include "xe_devcoredump.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_pagefault.h" @@ -25,12 +27,48 @@ #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_log.h" #include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" #include "xe_trace_guc.h" +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +enum { + /* Internal states, not error conditions */ + CT_DEAD_STATE_REARM, /* 0x0001 */ + CT_DEAD_STATE_CAPTURE, /* 0x0002 */ + + /* Error conditions */ + CT_DEAD_SETUP, /* 0x0004 */ + CT_DEAD_H2G_WRITE, /* 0x0008 */ + CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ + CT_DEAD_G2H_READ, /* 0x0020 */ + CT_DEAD_G2H_RECV, /* 0x0040 */ + CT_DEAD_G2H_RELEASE, /* 0x0080 */ + CT_DEAD_DEADLOCK, /* 0x0100 */ + CT_DEAD_PROCESS_FAILED, /* 0x0200 */ + CT_DEAD_FAST_G2H, /* 0x0400 */ + CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ + CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ + CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ + CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ +}; + +static void ct_dead_worker_func(struct work_struct *w); +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); + +#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) +#else +#define CT_DEAD(ct, ctb, reason) \ + do { \ + struct guc_ctb *_ctb = (ctb); \ + if (_ctb) \ + _ctb->info.broken = true; \ + } while (0) +#endif + /* Used when a CT send wants to block and / or receive data */ struct g2h_fence { u32 *response_buffer; @@ -175,14 +213,18 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); - ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0); + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); if (!ct->g2h_wq) return -ENOMEM; spin_lock_init(&ct->fast_lock); xa_init(&ct->fence_lookup); INIT_WORK(&ct->g2h_worker, g2h_worker_func); - INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); + INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + spin_lock_init(&ct->dead.lock); + INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); @@ -209,6 +251,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) ct->state = XE_GUC_CT_STATE_DISABLED; return 0; } +ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ #define desc_read(xe_, guc_ctb__, field_) \ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ @@ -395,6 +438,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); + xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -419,10 +463,22 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (ct_needs_safe_mode(ct)) ct_enter_safe_mode(ct); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /* + * The CT has now been reset so the dumper can be re-armed + * after any existing dead state has been dumped. + */ + spin_lock_irq(&ct->dead.lock); + if (ct->dead.reason) + ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); + spin_unlock_irq(&ct->dead.lock); +#endif + return 0; err_out: xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); + CT_DEAD(ct, NULL, SETUP); return err; } @@ -466,6 +522,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) if (cmd_len > h2g->info.space) { h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + + if (h2g->info.head > h2g->info.size) { + struct xe_device *xe = ct_to_xe(ct); + u32 desc_status = desc_read(xe, h2g, status); + + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + + xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", + h2g->info.head, h2g->info.size); + CT_DEAD(ct, h2g, H2G_HAS_ROOM); + return false; + } + h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, h2g->info.size) - h2g->info.resv_space; @@ -521,10 +590,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) { + bool bad = false; + lockdep_assert_held(&ct->fast_lock); - xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= - ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); - xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); + + bad = ct->ctbs.g2h.info.space + g2h_len > + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; + bad |= !ct->g2h_outstanding; + + if (bad) { + xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", + ct->ctbs.g2h.info.space, g2h_len, + ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, + ct->ctbs.g2h.info.space + g2h_len, + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, + ct->g2h_outstanding); + CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); + return; + } ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) @@ -551,12 +634,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 full_len; struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, tail * sizeof(u32)); + u32 desc_status; full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); - xe_gt_assert(gt, tail <= h2g->info.size); + + desc_status = desc_read(xe, h2g, status); + if (desc_status) { + xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); + goto corrupted; + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, h2g, tail); + u32 desc_head = desc_read(xe, h2g, head); + + if (tail != desc_tail) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); + goto corrupted; + } + + if (tail > h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", + tail, h2g->info.size); + goto corrupted; + } + + if (desc_head >= h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", + desc_head, h2g->info.size); + goto corrupted; + } + } /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -609,6 +723,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, desc_read(xe, h2g, head), h2g->info.tail); return 0; + +corrupted: + CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); + return -EPIPE; } /* @@ -716,7 +834,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); - struct drm_printer p = xe_gt_info_printer(gt); unsigned int sleep_period_ms = 1; int ret; @@ -769,8 +886,13 @@ try_again: goto broken; #undef g2h_avail - if (dequeue_one_g2h(ct) < 0) + ret = dequeue_one_g2h(ct); + if (ret < 0) { + if (ret != -ECANCELED) + xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", + ERR_PTR(ret)); goto broken; + } goto try_again; } @@ -779,8 +901,7 @@ try_again: broken: xe_gt_err(gt, "No forward process on H2G, reset required\n"); - xe_guc_ct_print(ct, &p, true); - ct->ctbs.h2g.info.broken = true; + CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); return -EDEADLK; } @@ -848,7 +969,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) #define ct_alive(ct) \ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ !ct->ctbs.g2h.info.broken) - if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) + if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -890,7 +1011,7 @@ retry_same_fence: goto retry_same_fence; if (!g2h_fence_needs_alloc(&g2h_fence)) - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); return ret; } @@ -916,7 +1037,7 @@ retry_same_fence: if (!ret) { xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); mutex_unlock(&ct->lock); return -ETIME; } @@ -1028,6 +1149,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) else xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", type, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; } @@ -1035,6 +1157,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ + /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; @@ -1079,7 +1202,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", origin); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); return -EPROTO; } @@ -1097,7 +1220,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) default: xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", type); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); ret = -EOPNOTSUPP; } @@ -1140,6 +1263,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) /* Selftest only at the moment */ break; case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: + ret = xe_guc_error_capture_handler(guc, payload, adj_len); + break; case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: /* FIXME: Handle this */ break; @@ -1174,9 +1299,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, PROCESS_FAILED); + } return 0; } @@ -1186,7 +1313,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - u32 tail, head, len; + u32 tail, head, len, desc_status; s32 avail; u32 action; u32 *hxg; @@ -1205,6 +1332,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + desc_status = desc_read(xe, g2h, status); + if (desc_status) { + if (desc_status & GUC_CTB_STATUS_DISABLED) { + /* + * Potentially valid if a CLIENT_RESET request resulted in + * contexts/engines being reset. But should never happen as + * no contexts should be active when CLIENT_RESET is sent. + */ + xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); + desc_status &= ~GUC_CTB_STATUS_DISABLED; + } + + if (desc_status) { + xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); + goto corrupted; + } + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, g2h, tail); + /* + u32 desc_head = desc_read(xe, g2h, head); + + * info.head and desc_head are updated back-to-back at the end of + * this function and nowhere else. Hence, they cannot be different + * unless two g2h_read calls are running concurrently. Which is not + * possible because it is guarded by ct->fast_lock. And yet, some + * discrete platforms are reguarly hitting this error :(. + * + * desc_head rolling backwards shouldn't cause any noticeable + * problems - just a delay in GuC being allowed to proceed past that + * point in the queue. So for now, just disable the error until it + * can be root caused. + * + if (g2h->info.head != desc_head) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT read: head was modified %u != %u\n", + desc_head, g2h->info.head); + goto corrupted; + } + */ + + if (g2h->info.head > g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", + g2h->info.head, g2h->info.size); + goto corrupted; + } + + if (desc_tail >= g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", + desc_tail, g2h->info.size); + goto corrupted; + } + } + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -1221,9 +1405,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) if (len > avail) { xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", avail, len); - g2h->info.broken = true; - - return -EPROTO; + goto corrupted; } head = (g2h->info.head + 1) % g2h->info.size; @@ -1269,6 +1451,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) action, len, g2h->info.head, tail); return len; + +corrupted: + CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); + return -EPROTO; } static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) @@ -1295,9 +1481,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_warn(gt, "NOT_POSSIBLE"); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, FAST_G2H); + } } /** @@ -1357,7 +1545,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct) static void receive_g2h(struct xe_guc_ct *ct) { - struct xe_gt *gt = ct_to_gt(ct); bool ongoing; int ret; @@ -1394,9 +1581,8 @@ static void receive_g2h(struct xe_guc_ct *ct) mutex_unlock(&ct->lock); if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { - struct drm_printer p = xe_gt_info_printer(gt); - - xe_guc_ct_print(ct, &p, false); + xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); + CT_DEAD(ct, NULL, G2H_RECV); kick_reset(ct); } } while (ret == 1); @@ -1412,49 +1598,34 @@ static void g2h_worker_func(struct work_struct *w) receive_g2h(ct); } -static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, - struct guc_ctb_snapshot *snapshot, - bool atomic) +static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) { - u32 head, tail; - - xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, - sizeof(struct guc_ct_buffer_desc)); - memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); + struct xe_guc_ct_snapshot *snapshot; - snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), - atomic ? GFP_ATOMIC : GFP_KERNEL); + snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot) + return NULL; - if (!snapshot->cmds) { - drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); - return; + if (ct->bo && want_ctb) { + snapshot->ctb_size = ct->bo->size; + snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); } - head = snapshot->desc.head; - tail = snapshot->desc.tail; - - if (head != tail) { - struct iosys_map map = - IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); - - while (head != tail) { - snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); - ++head; - if (head == ctb->info.size) { - head = 0; - map = ctb->cmds; - } else { - iosys_map_incr(&map, sizeof(u32)); - } - } - } + return snapshot; +} + +static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, + struct guc_ctb_snapshot *snapshot) +{ + xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, + sizeof(struct guc_ct_buffer_desc)); + memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); } static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, struct drm_printer *p) { - u32 head, tail; - drm_printf(p, "\tsize: %d\n", snapshot->info.size); drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); drm_printf(p, "\thead: %d\n", snapshot->info.head); @@ -1464,63 +1635,46 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); +} - if (!snapshot->cmds) - return; +static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) +{ + struct xe_device *xe = ct_to_xe(ct); + struct xe_guc_ct_snapshot *snapshot; - head = snapshot->desc.head; - tail = snapshot->desc.tail; + snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); + if (!snapshot) { + xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); + return NULL; + } - while (head != tail) { - drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, - snapshot->cmds[head]); - ++head; - if (head == snapshot->info.size) - head = 0; + if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { + snapshot->ct_enabled = true; + snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); + guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g); + guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h); } -} -static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) -{ - kfree(snapshot->cmds); + if (ct->bo && snapshot->ctb) + xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size); + + return snapshot; } /** * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. * @ct: GuC CT object. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. * * This can be printed out in a later stage like during dev_coredump - * analysis. + * analysis. This is safe to be called during atomic context. * * Returns: a GuC CT snapshot object that must be freed by the caller * by using `xe_guc_ct_snapshot_free`. */ -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, - bool atomic) +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) { - struct xe_device *xe = ct_to_xe(ct); - struct xe_guc_ct_snapshot *snapshot; - - snapshot = kzalloc(sizeof(*snapshot), - atomic ? GFP_ATOMIC : GFP_KERNEL); - - if (!snapshot) { - drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); - return NULL; - } - - if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { - snapshot->ct_enabled = true; - snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); - guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, - &snapshot->h2g, atomic); - guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, - &snapshot->g2h, atomic); - } - - return snapshot; + return guc_ct_snapshot_capture(ct, true, true); } /** @@ -1540,11 +1694,13 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, drm_puts(p, "H2G CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->h2g, p); - drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); + drm_puts(p, "G2H CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->g2h, p); - drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); + + if (snapshot->ctb) + xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size); } else { drm_puts(p, "CT disabled\n"); } @@ -1562,8 +1718,7 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) if (!snapshot) return; - guc_ctb_snapshot_free(&snapshot->h2g); - guc_ctb_snapshot_free(&snapshot->g2h); + kfree(snapshot->ctb); kfree(snapshot); } @@ -1571,16 +1726,121 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. + * @want_ctb: Should the full CTB content be dumped (vs just the headers) * - * This function quickly capture a snapshot and immediately print it out. + * This function will quickly capture a snapshot of the CT state + * and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, atomic); + snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) +{ + struct xe_guc_log_snapshot *snapshot_log; + struct xe_guc_ct_snapshot *snapshot_ct; + struct xe_guc *guc = ct_to_guc(ct); + unsigned long flags; + bool have_capture; + + if (ctb) + ctb->info.broken = true; + + /* Ignore further errors after the first dump until a reset */ + if (ct->dead.reported) + return; + + spin_lock_irqsave(&ct->dead.lock, flags); + + /* And only capture one dump at a time */ + have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); + ct->dead.reason |= (1 << reason_code) | + (1 << CT_DEAD_STATE_CAPTURE); + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + if (have_capture) + return; + + snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct)); + + spin_lock_irqsave(&ct->dead.lock, flags); + + if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { + xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); + xe_guc_log_snapshot_free(snapshot_log); + xe_guc_ct_snapshot_free(snapshot_ct); + } else { + ct->dead.snapshot_log = snapshot_log; + ct->dead.snapshot_ct = snapshot_ct; + } + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + queue_work(system_unbound_wq, &(ct)->dead.worker); +} + +static void ct_dead_print(struct xe_dead_ct *dead) +{ + struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + if (!dead->reason) { + xe_gt_err(gt, "CTB is dead for no reason!?\n"); + return; + } + + drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); + + /* Can't generate a genuine core dump at this point, so just do the good bits */ + drm_puts(&lp, "**** Xe Device Coredump ****\n"); + xe_device_snapshot_print(xe, &lp); + + drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); + drm_printf(&lp, "\tTile: %d\n", gt->tile->id); + + drm_puts(&lp, "**** GuC Log ****\n"); + xe_guc_log_snapshot_print(dead->snapshot_log, &lp); + + drm_puts(&lp, "**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); + + drm_puts(&lp, "Done.\n"); +} + +static void ct_dead_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); + + if (!ct->dead.reported) { + ct->dead.reported = true; + ct_dead_print(&ct->dead); + } + + spin_lock_irq(&ct->dead.lock); + + xe_guc_log_snapshot_free(ct->dead.snapshot_log); + ct->dead.snapshot_log = NULL; + xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); + ct->dead.snapshot_ct = NULL; + + if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { + /* A reset has occurred so re-arm the error reporting */ + ct->dead.reason = 0; + ct->dead.reported = false; + } + + spin_unlock_irq(&ct->dead.lock); +} +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 190202fce2d0..82c4ae458dda 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -9,6 +9,7 @@ #include "xe_guc_ct_types.h" struct drm_printer; +struct xe_device; int xe_guc_ct_init(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); @@ -16,12 +17,10 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct); void xe_guc_ct_stop(struct xe_guc_ct *ct); void xe_guc_ct_fast_path(struct xe_guc_ct *ct); -struct xe_guc_ct_snapshot * -xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic); -void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, - struct drm_printer *p); +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct); +void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p); void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic); +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb); static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 761cb9031298..8e1b9d981d61 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -52,8 +52,6 @@ struct guc_ctb { struct guc_ctb_snapshot { /** @desc: snapshot of the CTB descriptor */ struct guc_ct_buffer_desc desc; - /** @cmds: snapshot of the CTB commands */ - u32 *cmds; /** @info: snapshot of the CTB info */ struct guc_ctb_info info; }; @@ -70,6 +68,10 @@ struct xe_guc_ct_snapshot { struct guc_ctb_snapshot g2h; /** @h2g: H2G CTB snapshot */ struct guc_ctb_snapshot h2g; + /** @ctb_size: size of the snapshot of the CTB */ + size_t ctb_size; + /** @ctb: snapshot of the entire CTB */ + u32 *ctb; }; /** @@ -86,6 +88,24 @@ enum xe_guc_ct_state { XE_GUC_CT_STATE_ENABLED, }; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +/** struct xe_dead_ct - Information for debugging a dead CT */ +struct xe_dead_ct { + /** @lock: protects memory allocation/free operations, and @reason updates */ + spinlock_t lock; + /** @reason: bit mask of CT_DEAD_* reason codes */ + unsigned int reason; + /** @reported: for preventing multiple dumps per error sequence */ + bool reported; + /** @worker: worker thread to get out of interrupt context before dumping */ + struct work_struct worker; + /** snapshot_ct: copy of CT state and CTB content at point of error */ + struct xe_guc_ct_snapshot *snapshot_ct; + /** snapshot_log: copy of GuC log at point of error */ + struct xe_guc_log_snapshot *snapshot_log; +}; +#endif + /** * struct xe_guc_ct - GuC command transport (CT) layer * @@ -128,6 +148,11 @@ struct xe_guc_ct { u32 msg[GUC_CTB_MSG_MAX_LEN]; /** @fast_msg: Message buffer */ u32 fast_msg[GUC_CTB_MSG_MAX_LEN]; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /** @dead: information for debugging dead CTs */ + struct xe_dead_ct dead; +#endif }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c index d3822cbea273..995b306aced7 100644 --- a/drivers/gpu/drm/xe/xe_guc_debugfs.c +++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c @@ -47,9 +47,23 @@ static int guc_log(struct seq_file *m, void *data) return 0; } +static int guc_ctb(struct seq_file *m, void *data) +{ + struct xe_guc *guc = node_to_guc(m->private); + struct xe_device *xe = guc_to_xe(guc); + struct drm_printer p = drm_seq_file_printer(m); + + xe_pm_runtime_get(xe); + xe_guc_ct_print(&guc->ct, &p, true); + xe_pm_runtime_put(xe); + + return 0; +} + static const struct drm_info_list debugfs_list[] = { {"guc_info", guc_info, 0}, {"guc_log", guc_log, 0}, + {"guc_ctb", guc_ctb, 0}, }; void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 19ee71aeaf17..08ffe59f22fa 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -8,7 +8,9 @@ #include <linux/bits.h> +#include "abi/guc_capture_abi.h" #include "abi/guc_klvs_abi.h" +#include "xe_hw_engine_types.h" #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4 #define G2H_LEN_DW_DEREGISTER_CONTEXT 3 @@ -103,6 +105,7 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) +#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 @@ -157,24 +160,6 @@ struct guc_policies { u32 reserved[4]; } __packed; -/* GuC MMIO reg state struct */ -struct guc_mmio_reg { - u32 offset; - u32 value; - u32 flags; - u32 mask; -#define GUC_REGSET_MASKED BIT(0) -#define GUC_REGSET_MASKED_WITH_VALUE BIT(2) -#define GUC_REGSET_RESTORE_ONLY BIT(3) -} __packed; - -/* GuC register sets */ -struct guc_mmio_reg_set { - u32 address; - u16 count; - u16 reserved; -} __packed; - /* Generic GT SysInfo data types */ #define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0 #define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1 @@ -188,12 +173,6 @@ struct guc_gt_system_info { u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX]; } __packed; -enum { - GUC_CAPTURE_LIST_INDEX_PF = 0, - GUC_CAPTURE_LIST_INDEX_VF = 1, - GUC_CAPTURE_LIST_INDEX_MAX = 2, -}; - /* GuC Additional Data Struct */ struct guc_ads { struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; diff --git a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h index da0fedbbdbaf..da10cf0389cb 100644 --- a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h +++ b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h @@ -18,6 +18,13 @@ MAKE_GUC_KLV_KEY(CONCATENATE(VF_CFG_THRESHOLD_, TAG)) /** + * MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN - Prepare the name of the KLV length constant. + * @TAG: unique tag of the GuC threshold KLV key. + */ +#define MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) \ + MAKE_GUC_KLV_LEN(CONCATENATE(VF_CFG_THRESHOLD_, TAG)) + +/** * xe_guc_klv_threshold_key_to_index - Find index of the tracked GuC threshold. * @key: GuC threshold KLV key. * diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index a37ee3419428..df4cfb698cdb 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -5,13 +5,26 @@ #include "xe_guc_log.h" +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" #include "xe_bo.h" +#include "xe_devcoredump.h" +#include "xe_force_wake.h" #include "xe_gt.h" +#include "xe_gt_printk.h" #include "xe_map.h" +#include "xe_mmio.h" #include "xe_module.h" +static struct xe_guc * +log_to_guc(struct xe_guc_log *log) +{ + return container_of(log, struct xe_guc, log); +} + static struct xe_gt * log_to_gt(struct xe_guc_log *log) { @@ -49,32 +62,194 @@ static size_t guc_log_size(void) CAPTURE_BUFFER_SIZE; } -void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) +#define GUC_LOG_CHUNK_SIZE SZ_2M + +static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *log, bool atomic) +{ + struct xe_guc_log_snapshot *snapshot; + size_t remain; + int i; + + snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot) + return NULL; + + /* + * NB: kmalloc has a hard limit well below the maximum GuC log buffer size. + * Also, can't use vmalloc as might be called from atomic context. So need + * to break the buffer up into smaller chunks that can be allocated. + */ + snapshot->size = log->bo->size; + snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE); + + snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy), + atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot->copy) + goto fail_snap; + + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); + + snapshot->copy[i] = kmalloc(size, atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot->copy[i]) + goto fail_copy; + remain -= size; + } + + return snapshot; + +fail_copy: + for (i = 0; i < snapshot->num_chunks; i++) + kfree(snapshot->copy[i]); + kfree(snapshot->copy); +fail_snap: + kfree(snapshot); + return NULL; +} + +/** + * xe_guc_log_snapshot_free - free a previously captured GuC log snapshot + * @snapshot: GuC log snapshot structure + * + * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is + * responsible for calling xe_guc_log_snapshot_free when done with the snapshot. + */ +void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot) +{ + int i; + + if (!snapshot) + return; + + if (snapshot->copy) { + for (i = 0; i < snapshot->num_chunks; i++) + kfree(snapshot->copy[i]); + kfree(snapshot->copy); + } + + kfree(snapshot); +} + +/** + * xe_guc_log_snapshot_capture - create a new snapshot copy the GuC log for later dumping + * @log: GuC log structure + * @atomic: is the call inside an atomic section of some kind? + * + * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is + * responsible for calling xe_guc_log_snapshot_free when done with the snapshot. + */ +struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic) { + struct xe_guc_log_snapshot *snapshot; struct xe_device *xe = log_to_xe(log); - size_t size; - int i, j; + struct xe_guc *guc = log_to_guc(log); + struct xe_gt *gt = log_to_gt(log); + unsigned int fw_ref; + size_t remain; + int i; - xe_assert(xe, log->bo); + if (!log->bo) { + xe_gt_err(gt, "GuC log buffer not allocated\n"); + return NULL; + } - size = log->bo->size; + snapshot = xe_guc_log_snapshot_alloc(log, atomic); + if (!snapshot) { + xe_gt_err(gt, "GuC log snapshot not allocated\n"); + return NULL; + } -#define DW_PER_READ 128 - xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32)))); - for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) { - u32 read[DW_PER_READ]; + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); - xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32), - DW_PER_READ * sizeof(u32)); -#define DW_PER_PRINT 4 - for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) { - u32 *print = read + j * DW_PER_PRINT; + xe_map_memcpy_from(xe, snapshot->copy[i], &log->bo->vmap, + i * GUC_LOG_CHUNK_SIZE, size); + remain -= size; + } - drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", - *(print + 0), *(print + 1), - *(print + 2), *(print + 3)); - } + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) { + snapshot->stamp = ~0ULL; + } else { + snapshot->stamp = xe_mmio_read64_2x32(>->mmio, GUC_PMTIMESTAMP_LO); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } + snapshot->ktime = ktime_get_boottime_ns(); + snapshot->level = log->level; + snapshot->ver_found = guc->fw.versions.found[XE_UC_FW_VER_RELEASE]; + snapshot->ver_want = guc->fw.versions.wanted; + snapshot->path = guc->fw.path; + + return snapshot; +} + +/** + * xe_guc_log_snapshot_print - dump a previously saved copy of the GuC log to some useful location + * @snapshot: a snapshot of the GuC log + * @p: the printer object to output to + */ +void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p) +{ + size_t remain; + int i; + + if (!snapshot) { + drm_printf(p, "GuC log snapshot not allocated!\n"); + return; + } + + drm_printf(p, "GuC firmware: %s\n", snapshot->path); + drm_printf(p, "GuC version: %u.%u.%u (wanted %u.%u.%u)\n", + snapshot->ver_found.major, snapshot->ver_found.minor, snapshot->ver_found.patch, + snapshot->ver_want.major, snapshot->ver_want.minor, snapshot->ver_want.patch); + drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", snapshot->ktime, snapshot->ktime); + drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp); + drm_printf(p, "Log level: %u\n", snapshot->level); + + remain = snapshot->size; + for (i = 0; i < snapshot->num_chunks; i++) { + size_t size = min(GUC_LOG_CHUNK_SIZE, remain); + + xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size); + remain -= size; + } +} + +/** + * xe_guc_log_print_dmesg - dump a copy of the GuC log to dmesg + * @log: GuC log structure + */ +void xe_guc_log_print_dmesg(struct xe_guc_log *log) +{ + struct xe_gt *gt = log_to_gt(log); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + drm_printf(&lp, "Dumping GuC log for %ps...\n", __builtin_return_address(0)); + + xe_guc_log_print(log, &lp); + + drm_printf(&lp, "Done.\n"); +} + +/** + * xe_guc_log_print - dump a copy of the GuC log to some useful location + * @log: GuC log structure + * @p: the printer object to output to + */ +void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) +{ + struct xe_guc_log_snapshot *snapshot; + + drm_printf(p, "**** GuC Log ****\n"); + + snapshot = xe_guc_log_snapshot_capture(log, false); + drm_printf(p, "CS reference clock: %u\n", log_to_gt(log)->info.reference_clock); + xe_guc_log_snapshot_print(snapshot, p); + xe_guc_log_snapshot_free(snapshot); } int xe_guc_log_init(struct xe_guc_log *log) @@ -96,3 +271,105 @@ int xe_guc_log_init(struct xe_guc_log *log) return 0; } + +ALLOW_ERROR_INJECTION(xe_guc_log_init, ERRNO); /* See xe_pci_probe() */ + +static u32 xe_guc_log_section_size_crash(struct xe_guc_log *log) +{ + return CRASH_BUFFER_SIZE; +} + +static u32 xe_guc_log_section_size_debug(struct xe_guc_log *log) +{ + return DEBUG_BUFFER_SIZE; +} + +/** + * xe_guc_log_section_size_capture - Get capture buffer size within log sections. + * @log: The log object. + * + * This function will return the capture buffer size within log sections. + * + * Return: capture buffer size. + */ +u32 xe_guc_log_section_size_capture(struct xe_guc_log *log) +{ + return CAPTURE_BUFFER_SIZE; +} + +/** + * xe_guc_get_log_buffer_size - Get log buffer size for a type. + * @log: The log object. + * @type: The log buffer type + * + * Return: buffer size. + */ +u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type) +{ + switch (type) { + case GUC_LOG_BUFFER_CRASH_DUMP: + return xe_guc_log_section_size_crash(log); + case GUC_LOG_BUFFER_DEBUG: + return xe_guc_log_section_size_debug(log); + case GUC_LOG_BUFFER_CAPTURE: + return xe_guc_log_section_size_capture(log); + } + return 0; +} + +/** + * xe_guc_get_log_buffer_offset - Get offset in log buffer for a type. + * @log: The log object. + * @type: The log buffer type + * + * This function will return the offset in the log buffer for a type. + * Return: buffer offset. + */ +u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type) +{ + enum guc_log_buffer_type i; + u32 offset = PAGE_SIZE;/* for the log_buffer_states */ + + for (i = GUC_LOG_BUFFER_CRASH_DUMP; i < GUC_LOG_BUFFER_TYPE_MAX; ++i) { + if (i == type) + break; + offset += xe_guc_get_log_buffer_size(log, i); + } + + return offset; +} + +/** + * xe_guc_check_log_buf_overflow - Check if log buffer overflowed + * @log: The log object. + * @type: The log buffer type + * @full_cnt: The count of buffer full + * + * This function will check count of buffer full against previous, mismatch + * indicate overflowed. + * Update the sampled_overflow counter, if the 4 bit counter overflowed, add + * up 16 to correct the value. + * + * Return: True if overflowed. + */ +bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, enum guc_log_buffer_type type, + unsigned int full_cnt) +{ + unsigned int prev_full_cnt = log->stats[type].sampled_overflow; + bool overflow = false; + + if (full_cnt != prev_full_cnt) { + overflow = true; + + log->stats[type].overflow = full_cnt; + log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; + + if (full_cnt < prev_full_cnt) { + /* buffer_full_cnt is a 4 bit counter */ + log->stats[type].sampled_overflow += 16; + } + xe_gt_notice(log_to_gt(log), "log buffer overflow\n"); + } + + return overflow; +} diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h index 2d25ab28b4b3..5b896f5fafaf 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.h +++ b/drivers/gpu/drm/xe/xe_guc_log.h @@ -7,8 +7,10 @@ #define _XE_GUC_LOG_H_ #include "xe_guc_log_types.h" +#include "abi/guc_log_abi.h" struct drm_printer; +struct xe_device; #if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER) #define CRASH_BUFFER_SIZE SZ_1M @@ -17,7 +19,7 @@ struct drm_printer; #else #define CRASH_BUFFER_SIZE SZ_8K #define DEBUG_BUFFER_SIZE SZ_64K -#define CAPTURE_BUFFER_SIZE SZ_16K +#define CAPTURE_BUFFER_SIZE SZ_1M #endif /* * While we're using plain log level in i915, GuC controls are much more... @@ -38,6 +40,10 @@ struct drm_printer; int xe_guc_log_init(struct xe_guc_log *log); void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p); +void xe_guc_log_print_dmesg(struct xe_guc_log *log); +struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic); +void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p); +void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot); static inline u32 xe_guc_log_get_level(struct xe_guc_log *log) @@ -45,4 +51,11 @@ xe_guc_log_get_level(struct xe_guc_log *log) return log->level; } +u32 xe_guc_log_section_size_capture(struct xe_guc_log *log); +u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type); +u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type); +bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, + enum guc_log_buffer_type type, + unsigned int full_cnt); + #endif diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h index 125080d138a7..b3d5c72ac752 100644 --- a/drivers/gpu/drm/xe/xe_guc_log_types.h +++ b/drivers/gpu/drm/xe/xe_guc_log_types.h @@ -7,10 +7,38 @@ #define _XE_GUC_LOG_TYPES_H_ #include <linux/types.h> +#include "abi/guc_log_abi.h" + +#include "xe_uc_fw_types.h" struct xe_bo; /** + * struct xe_guc_log_snapshot: + * Capture of the GuC log plus various state useful for decoding the log + */ +struct xe_guc_log_snapshot { + /** @size: Size in bytes of the @copy allocation */ + size_t size; + /** @copy: Host memory copy of the log buffer for later dumping, split into chunks */ + void **copy; + /** @num_chunks: Number of chunks within @copy */ + int num_chunks; + /** @ktime: Kernel time the snapshot was taken */ + u64 ktime; + /** @stamp: GuC timestamp at which the snapshot was taken */ + u64 stamp; + /** @level: GuC log verbosity level */ + u32 level; + /** @ver_found: GuC firmware version */ + struct xe_uc_fw_version ver_found; + /** @ver_want: GuC firmware version that driver expected */ + struct xe_uc_fw_version ver_want; + /** @path: Path of GuC firmware blob */ + const char *path; +}; + +/** * struct xe_guc_log - GuC log */ struct xe_guc_log { @@ -18,6 +46,12 @@ struct xe_guc_log { u32 level; /** @bo: XE BO for GuC log */ struct xe_bo *bo; + /** @stats: logging related stats */ + struct { + u32 sampled_overflow; + u32 overflow; + u32 flush; + } stats[GUC_LOG_BUFFER_TYPE_MAX]; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 034b29984d5e..e8b9faeaef64 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -262,7 +262,7 @@ static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE; /* Allow/Disallow punit to process software freq requests */ - xe_mmio_write32(gt, RP_CONTROL, state); + xe_mmio_write32(>->mmio, RP_CONTROL, state); } static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) @@ -274,7 +274,7 @@ static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) /* Req freq is in units of 16.66 Mhz */ rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq)); - xe_mmio_write32(gt, RPNSWREQ, rpnswreq); + xe_mmio_write32(>->mmio, RPNSWREQ, rpnswreq); /* Sleep for a small time to allow pcode to respond */ usleep_range(100, 300); @@ -334,9 +334,9 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc) u32 reg; if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY); + reg = xe_mmio_read32(>->mmio, MTL_MPE_FREQUENCY); else - reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY); + reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY); pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); } @@ -353,9 +353,9 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc) * PCODE at a different register */ if (xe->info.platform == XE_PVC) - reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); else - reg = xe_mmio_read32(gt, FREQ_INFO_REC); + reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } @@ -392,10 +392,10 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) /* When in RC6, actual frequency reported will be 0. */ if (GRAPHICS_VERx100(xe) >= 1270) { - freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); + freq = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); freq = REG_FIELD_GET(MTL_CAGF_MASK, freq); } else { - freq = xe_mmio_read32(gt, GT_PERF_STATUS); + freq = xe_mmio_read32(>->mmio, GT_PERF_STATUS); freq = REG_FIELD_GET(CAGF_MASK, freq); } @@ -415,22 +415,24 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); - int ret; + unsigned int fw_ref; /* * GuC SLPC plays with cur freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } - *freq = xe_mmio_read32(gt, RPNSWREQ); + *freq = xe_mmio_read32(>->mmio, RPNSWREQ); *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq); *freq = decode_freq(*freq); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -480,6 +482,7 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) { struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret; mutex_lock(&pc->freq_lock); @@ -493,9 +496,11 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) * GuC SLPC plays with min freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - goto out; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto fw; + } ret = pc_action_query_task_state(pc); if (ret) @@ -504,7 +509,7 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) *freq = pc_get_min_freq(pc); fw: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); out: mutex_unlock(&pc->freq_lock); return ret; @@ -612,10 +617,10 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) u32 reg, gt_c_state; if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { - reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); + reg = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg); } else { - reg = xe_mmio_read32(gt, GT_CORE_STATUS); + reg = xe_mmio_read32(>->mmio, GT_CORE_STATUS); gt_c_state = REG_FIELD_GET(RCN_MASK, reg); } @@ -638,7 +643,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 reg; - reg = xe_mmio_read32(gt, GT_GFX_RC6); + reg = xe_mmio_read32(>->mmio, GT_GFX_RC6); return reg; } @@ -652,7 +657,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u64 reg; - reg = xe_mmio_read32(gt, MTL_MEDIA_MC6); + reg = xe_mmio_read32(>->mmio, MTL_MEDIA_MC6); return reg; } @@ -665,9 +670,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) xe_device_assert_mem_access(pc_to_xe(pc)); if (xe_gt_is_media_type(gt)) - reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, MTL_MEDIAP_STATE_CAP); else - reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, MTL_RP_STATE_CAP); pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); @@ -683,9 +688,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) xe_device_assert_mem_access(pc_to_xe(pc)); if (xe->info.platform == XE_PVC) - reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); else - reg = xe_mmio_read32(gt, RP_STATE_CAP); + reg = xe_mmio_read32(>->mmio, RP_STATE_CAP); pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; } @@ -855,6 +860,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) { struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); + unsigned int fw_ref; int ret = 0; if (xe->info.skip_guc_pc) @@ -864,13 +870,15 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) if (ret) return ret; - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } xe_gt_idle_disable_c6(gt); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; } @@ -956,13 +964,16 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) struct xe_device *xe = pc_to_xe(pc); struct xe_gt *gt = pc_to_gt(pc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + unsigned int fw_ref; int ret; xe_gt_assert(gt, xe_device_uc_enabled(xe)); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (ret) - return ret; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return -ETIMEDOUT; + } if (xe->info.skip_guc_pc) { if (xe->info.platform != XE_PVC) @@ -1005,7 +1016,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL); out: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return ret; } @@ -1037,18 +1048,19 @@ static void xe_guc_pc_fini_hw(void *arg) { struct xe_guc_pc *pc = arg; struct xe_device *xe = pc_to_xe(pc); + unsigned int fw_ref; if (xe_device_wedged(xe)) return; - XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); xe_guc_pc_gucrc_disable(pc); XE_WARN_ON(xe_guc_pc_stop(pc)); /* Bind requested freq to mert_freq_cap before unload */ pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); - xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); } /** diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c index ade6162dc259..8f62de026724 100644 --- a/drivers/gpu/drm/xe/xe_guc_relay.c +++ b/drivers/gpu/drm/xe/xe_guc_relay.c @@ -5,6 +5,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> +#include <linux/fault-inject.h> #include <drm/drm_managed.h> @@ -355,6 +356,7 @@ int xe_guc_relay_init(struct xe_guc_relay *relay) return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay); } +ALLOW_ERROR_INJECTION(xe_guc_relay_init, ERRNO); /* See xe_pci_probe() */ static u32 to_relay_error(int err) { diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 4f5d00aea716..9a8564ea06b5 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -27,6 +27,7 @@ #include "xe_gt_clock.h" #include "xe_gt_printk.h" #include "xe_guc.h" +#include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_exec_queue_types.h" #include "xe_guc_id_mgr.h" @@ -716,6 +717,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) struct xe_exec_queue *q = job->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); + struct dma_fence *fence = NULL; bool lr = xe_exec_queue_is_lr(q); xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || @@ -733,12 +735,12 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) if (lr) { xe_sched_job_set_error(job, -EOPNOTSUPP); - return NULL; - } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) { - return job->fence; + dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */ } else { - return dma_fence_get(job->fence); + fence = job->fence; } + + return fence; } static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) @@ -749,7 +751,7 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) xe_sched_job_put(job); } -static int guc_read_stopped(struct xe_guc *guc) +int xe_guc_read_stopped(struct xe_guc *guc) { return atomic_read(&guc->submission_state.stopped); } @@ -771,7 +773,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc, set_min_preemption_timeout(guc, q); smp_rmb(); ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { struct xe_gpu_scheduler *sched = &q->guc->sched; @@ -897,7 +899,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) */ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - guc_read_stopped(guc), HZ * 5); + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { drm_warn(&xe->drm, "Schedule disable failed to respond"); xe_sched_submission_start(sched); @@ -975,8 +977,8 @@ static void enable_scheduling(struct xe_exec_queue *q) ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) { + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) { xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond"); set_exec_queue_banned(q); xe_gt_reset_async(q->gt); @@ -1031,6 +1033,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_guc *guc = exec_queue_to_guc(q); const char *process_name = "no process"; + struct xe_device *xe = guc_to_xe(guc); + unsigned int fw_ref; int err = -ETIME; pid_t pid = -1; int i = 0; @@ -1058,6 +1062,22 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) exec_queue_destroyed(q); /* + * If devcoredump not captured and GuC capture for the job is not ready + * do manual capture first and decide later if we need to use it + */ + if (!exec_queue_killed(q) && !xe->devcoredump.captured && + !xe_guc_capture_get_matching_and_lock(job)) { + /* take force wake before engine register manual capture */ + fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); + + xe_engine_snapshot_capture_for_job(job); + + xe_force_wake_put(gt_to_fw(q->gt), fw_ref); + } + + /* * XXX: Sampling timeout doesn't work in wedged mode as we have to * modify scheduling state to read timestamp. We could read the * timestamp from a register to accumulate current running time but this @@ -1080,8 +1100,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) */ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) goto trigger_reset; /* @@ -1105,8 +1125,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) smp_rmb(); ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_disable(q) || - guc_read_stopped(guc), HZ * 5); - if (!ret || guc_read_stopped(guc)) { + xe_guc_read_stopped(guc), HZ * 5); + if (!ret || xe_guc_read_stopped(guc)) { trigger_reset: if (!ret) xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond"); @@ -1295,7 +1315,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q) struct xe_device *xe = guc_to_xe(guc); xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) || - guc_read_stopped(guc)); + xe_guc_read_stopped(guc)); xe_assert(xe, q->guc->suspend_pending); __suspend_fence_signal(q); @@ -1309,9 +1329,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && exec_queue_enabled(q)) { wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || - guc_read_stopped(guc)); + xe_guc_read_stopped(guc)); - if (!guc_read_stopped(guc)) { + if (!xe_guc_read_stopped(guc)) { s64 since_resume_ms = ktime_ms_delta(ktime_get(), q->guc->resume_time); @@ -1435,7 +1455,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) q->entity = &ge->entity; - if (guc_read_stopped(guc)) + if (xe_guc_read_stopped(guc)) xe_sched_stop(sched); mutex_unlock(&guc->submission_state.lock); @@ -1591,7 +1611,7 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) ret = wait_event_interruptible_timeout(q->guc->suspend_wait, !READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || - guc_read_stopped(guc), + xe_guc_read_stopped(guc), HZ * 5); if (!ret) { @@ -1717,7 +1737,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc) void xe_guc_submit_reset_wait(struct xe_guc *guc) { wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) || - !guc_read_stopped(guc)); + !xe_guc_read_stopped(guc)); } void xe_guc_submit_stop(struct xe_guc *guc) @@ -1726,7 +1746,7 @@ void xe_guc_submit_stop(struct xe_guc *guc) unsigned long index; struct xe_device *xe = guc_to_xe(guc); - xe_assert(xe, guc_read_stopped(guc) == 1); + xe_assert(xe, xe_guc_read_stopped(guc) == 1); mutex_lock(&guc->submission_state.lock); @@ -1770,7 +1790,7 @@ int xe_guc_submit_start(struct xe_guc *guc) unsigned long index; struct xe_device *xe = guc_to_xe(guc); - xe_assert(xe, guc_read_stopped(guc) == 1); + xe_assert(xe, xe_guc_read_stopped(guc) == 1); mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); @@ -1949,8 +1969,6 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d", xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); - /* FIXME: Do error capture, most likely async */ - trace_xe_exec_queue_reset(q); /* @@ -1966,6 +1984,36 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len) return 0; } +/* + * xe_guc_error_capture_handler - Handler of GuC captured message + * @guc: The GuC object + * @msg: Point to the message + * @len: The message length + * + * When GuC captured data is ready, GuC will send message + * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be + * called 1st to check status before process the data comes with the message. + * + * Returns: error code. 0 if success + */ +int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len) +{ + u32 status; + + if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN)) { + xe_gt_dbg(guc_to_gt(guc), "Invalid length %u", len); + return -EPROTO; + } + + status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK; + if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE) + xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space"); + + xe_guc_capture_process(guc); + + return 0; +} + int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, u32 len) { @@ -2180,7 +2228,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps if (!snapshot) return; - drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id); + drm_printf(p, "GuC ID: %d\n", snapshot->guc.id); drm_printf(p, "\tName: %s\n", snapshot->name); drm_printf(p, "\tClass: %d\n", snapshot->class); drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index bdf8c9f3d24a..9b71a986c6ca 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -20,12 +20,14 @@ void xe_guc_submit_stop(struct xe_guc *guc); int xe_guc_submit_start(struct xe_guc *guc); void xe_guc_submit_wedge(struct xe_guc *guc); +int xe_guc_read_stopped(struct xe_guc *guc); int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, u32 len); int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len); +int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len); struct xe_guc_submit_exec_queue_snapshot * xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q); diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index ed150fc09ad0..fa75f57bf5da 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -58,6 +58,8 @@ struct xe_guc { struct xe_guc_ads ads; /** @ct: GuC ct */ struct xe_guc_ct ct; + /** @capture: the error-state-capture module's data and objects */ + struct xe_guc_state_capture *capture; /** @pc: GuC Power Conservation */ struct xe_guc_pc pc; /** @dbm: GuC Doorbell Manager */ diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index f5459f97af23..6a846e4cb221 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -229,7 +229,7 @@ bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type) { struct xe_gt *gt = huc_to_gt(huc); - return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val; + return xe_mmio_read32(>->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val; } int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) @@ -268,7 +268,7 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) goto fail; } - ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val, + ret = xe_mmio_wait32(>->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val, huc_auth_modes[type].val, 100000, NULL, false); if (ret) { xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret)); @@ -296,19 +296,19 @@ void xe_huc_sanitize(struct xe_huc *huc) void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) { struct xe_gt *gt = huc_to_gt(huc); - int err; + unsigned int fw_ref; xe_uc_fw_print(&huc->fw, p); if (!xe_uc_fw_is_enabled(&huc->fw)) return; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) return; drm_printf(p, "\nHuC status: 0x%08x\n", - xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO)); + xe_mmio_read32(>->mmio, HUC_KERNEL_LOAD_INFO)); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index c9c3beb3ce8d..1557acee3523 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -12,6 +12,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_assert.h" #include "xe_bo.h" #include "xe_device.h" @@ -23,6 +24,7 @@ #include "xe_gt_printk.h" #include "xe_gt_mcr.h" #include "xe_gt_topology.h" +#include "xe_guc_capture.h" #include "xe_hw_engine_group.h" #include "xe_hw_fence.h" #include "xe_irq.h" @@ -295,7 +297,7 @@ void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, reg.addr += hwe->mmio_base; - xe_mmio_write32(hwe->gt, reg, val); + xe_mmio_write32(&hwe->gt->mmio, reg, val); } /** @@ -315,7 +317,7 @@ u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) reg.addr += hwe->mmio_base; - return xe_mmio_read32(hwe->gt, reg); + return xe_mmio_read32(&hwe->gt->mmio, reg); } void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) @@ -324,7 +326,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask) - xe_mmio_write32(hwe->gt, RCU_MODE, + xe_mmio_write32(&hwe->gt->mmio, RCU_MODE, _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); @@ -354,7 +356,7 @@ static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt, hwe->class != XE_ENGINE_CLASS_RENDER) return false; - return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE; + return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE; } void @@ -460,6 +462,30 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr); } +static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance) +{ + const struct engine_info *info; + enum xe_hw_engine_id id; + + for (id = 0; id < XE_NUM_HW_ENGINES; ++id) { + info = &engine_infos[id]; + if (info->class == class && info->instance == instance) + return info; + } + + return NULL; +} + +static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class) +{ + /* For MSI-X, hw engines report to offset of engine instance zero */ + const struct engine_info *info = find_engine_info(class, 0); + + xe_gt_assert(gt, info); + + return info ? info->irq_offset : 0; +} + static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, enum xe_hw_engine_id id) { @@ -479,7 +505,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, hwe->class = info->class; hwe->instance = info->instance; hwe->mmio_base = info->mmio_base; - hwe->irq_offset = info->irq_offset; + hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ? + get_msix_irq_offset(gt, info->class) : + info->irq_offset; hwe->domain = info->domain; hwe->name = info->name; hwe->fence_irq = >->fence_irq[info->class]; @@ -612,7 +640,7 @@ static void read_media_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE); + media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE); /* * Pre-Xe_HP platforms had register bits representing absent engines, @@ -657,7 +685,7 @@ static void read_copy_fuses(struct xe_gt *gt) xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3); + bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3); bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask); /* BCS0 is always present; only BCS1-BCS8 may be fused off */ @@ -704,7 +732,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); u32 ccs_mask; - ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4); + ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4); ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask); for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) { @@ -742,8 +770,8 @@ static void check_gsc_availability(struct xe_gt *gt) gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0); /* interrupts where previously enabled, so turn them off */ - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0); - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0); + xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0); drm_info(&xe->drm, "gsccs disabled due to lack of FW\n"); } @@ -798,60 +826,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec) xe_hw_fence_irq_run(hwe->fence_irq); } -static bool -is_slice_common_per_gslice(struct xe_device *xe) -{ - return GRAPHICS_VERx100(xe) >= 1255; -} - -static void -xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, - struct xe_hw_engine_snapshot *snapshot) -{ - struct xe_gt *gt = hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - unsigned int dss; - u16 group, instance; - - snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0)); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - snapshot->reg.instdone.slice_common[0] = - xe_mmio_read32(gt, SC_INSTDONE); - snapshot->reg.instdone.slice_common_extra[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA); - snapshot->reg.instdone.slice_common_extra2[0] = - xe_mmio_read32(gt, SC_INSTDONE_EXTRA2); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.slice_common[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance); - snapshot->reg.instdone.slice_common_extra[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance); - snapshot->reg.instdone.slice_common_extra2[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance); - } - } - - for_each_geometry_dss(dss, gt, group, instance) { - snapshot->reg.instdone.sampler[dss] = - xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance); - snapshot->reg.instdone.row[dss] = - xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance); - - if (GRAPHICS_VERx100(xe) >= 1255) - snapshot->reg.instdone.geom_svg[dss] = - xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT, - group, instance); - } -} - /** * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine. * @hwe: Xe HW Engine. + * @job: The job object. * * This can be printed out in a later stage like during dev_coredump * analysis. @@ -860,11 +838,10 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe, * caller, using `xe_hw_engine_snapshot_free`. */ struct xe_hw_engine_snapshot * -xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) +xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job) { struct xe_hw_engine_snapshot *snapshot; - size_t len; - u64 val; + struct __guc_capture_parsed_output *node; if (!xe_hw_engine_is_valid(hwe)) return NULL; @@ -874,28 +851,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) if (!snapshot) return NULL; - /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it - * includes xe_hw_engine_types.h the length of this 3 registers can't be - * set in struct xe_hw_engine_snapshot, so here doing additional - * allocations. - */ - len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32)); - snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC); - snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC); - if (!snapshot->reg.instdone.slice_common || - !snapshot->reg.instdone.slice_common_extra || - !snapshot->reg.instdone.slice_common_extra2 || - !snapshot->reg.instdone.sampler || - !snapshot->reg.instdone.row || - !snapshot->reg.instdone.geom_svg) { - xe_hw_engine_snapshot_free(snapshot); - return NULL; - } - snapshot->name = kstrdup(hwe->name, GFP_ATOMIC); snapshot->hwe = hwe; snapshot->logical_instance = hwe->logical_instance; @@ -903,157 +858,32 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe) snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt), hwe->domain); snapshot->mmio_base = hwe->mmio_base; + snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe); /* no more VF accessible data below this point */ if (IS_SRIOV_VF(gt_to_xe(hwe->gt))) return snapshot; - snapshot->reg.ring_execlist_status = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)); - snapshot->reg.ring_execlist_status |= val << 32; - - snapshot->reg.ring_execlist_sq_contents = - xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0)); - snapshot->reg.ring_execlist_sq_contents |= val << 32; - - snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)); - snapshot->reg.ring_acthd |= val << 32; - - snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)); - snapshot->reg.ring_bbaddr |= val << 32; - - snapshot->reg.ring_dma_fadd = - xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)); - val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)); - snapshot->reg.ring_dma_fadd |= val << 32; - - snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0)); - snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)); - snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0)); - if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) { - val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0)); - snapshot->reg.ring_start |= val << 32; - } - if (xe_gt_has_indirect_ring_state(hwe->gt)) { - snapshot->reg.indirect_ring_state = - xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0)); - } - - snapshot->reg.ring_head = - xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR; - snapshot->reg.ring_tail = - xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR; - snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0)); - snapshot->reg.ring_mi_mode = - xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); - snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0)); - snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0)); - snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0)); - snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0)); - snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0)); - snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0)); - xe_hw_engine_snapshot_instdone_capture(hwe, snapshot); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE); - - return snapshot; -} - -static void -xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p) -{ - struct xe_gt *gt = snapshot->hwe->gt; - struct xe_device *xe = gt_to_xe(gt); - u16 group, instance; - unsigned int dss; - - drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring); - - if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER) - return; - - if (is_slice_common_per_gslice(xe) == false) { - drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra[0]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n", - snapshot->reg.instdone.slice_common_extra2[0]); - } else { - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra[dss]); - drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.slice_common_extra2[dss]); + if (job) { + /* If got guc capture, set source to GuC */ + node = xe_guc_capture_get_matching_and_lock(job); + if (node) { + struct xe_device *xe = gt_to_xe(hwe->gt); + struct xe_devcoredump *coredump = &xe->devcoredump; + + coredump->snapshot.matched_node = node; + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_GUC; + xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node"); + return snapshot; } } - for_each_geometry_dss(dss, gt, group, instance) { - drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.sampler[dss]); - drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss, - snapshot->reg.instdone.row[dss]); - - if (GRAPHICS_VERx100(xe) >= 1255) - drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n", - dss, snapshot->reg.instdone.geom_svg[dss]); - } -} + /* otherwise, do manual capture */ + xe_engine_manual_capture(hwe, snapshot); + snapshot->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL; + xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot"); -/** - * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot. - * @snapshot: Xe HW Engine snapshot object. - * @p: drm_printer where it will be printed out. - * - * This function prints out a given Xe HW Engine snapshot object. - */ -void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, - struct drm_printer *p) -{ - if (!snapshot) - return; - - drm_printf(p, "%s (physical), logical instance=%d\n", - snapshot->name ? snapshot->name : "", - snapshot->logical_instance); - drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n", - snapshot->forcewake.domain, snapshot->forcewake.ref); - drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam); - drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga); - drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n", - snapshot->reg.ring_execlist_status); - drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n", - snapshot->reg.ring_execlist_sq_contents); - drm_printf(p, "\tRING_START: 0x%016llx\n", snapshot->reg.ring_start); - drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head); - drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail); - drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl); - drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode); - drm_printf(p, "\tRING_MODE: 0x%08x\n", - snapshot->reg.ring_mode); - drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr); - drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr); - drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr); - drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir); - drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd); - drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr); - drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd); - drm_printf(p, "\tINDIRECT_RING_STATE: 0x%08x\n", - snapshot->reg.indirect_ring_state); - drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr); - xe_hw_engine_snapshot_instdone_print(snapshot, p); - - if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE) - drm_printf(p, "\tRCU_MODE: 0x%08x\n", - snapshot->reg.rcu_mode); - drm_puts(p, "\n"); + return snapshot; } /** @@ -1065,15 +895,18 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, */ void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot) { + struct xe_gt *gt; if (!snapshot) return; - kfree(snapshot->reg.instdone.slice_common); - kfree(snapshot->reg.instdone.slice_common_extra); - kfree(snapshot->reg.instdone.slice_common_extra2); - kfree(snapshot->reg.instdone.sampler); - kfree(snapshot->reg.instdone.row); - kfree(snapshot->reg.instdone.geom_svg); + gt = snapshot->hwe->gt; + /* + * xe_guc_capture_put_matched_nodes is called here and from + * xe_devcoredump_snapshot_free, to cover the 2 calling paths + * of hw_engines - debugfs and devcoredump free. + */ + xe_guc_capture_put_matched_nodes(>->uc.guc); + kfree(snapshot->name); kfree(snapshot); } @@ -1089,8 +922,8 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p) { struct xe_hw_engine_snapshot *snapshot; - snapshot = xe_hw_engine_snapshot_capture(hwe); - xe_hw_engine_snapshot_print(snapshot, p); + snapshot = xe_hw_engine_snapshot_capture(hwe, NULL); + xe_engine_snapshot_print(snapshot, p); xe_hw_engine_snapshot_free(snapshot); } @@ -1150,7 +983,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class) u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe) { - return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base)); + return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base)); } enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h index 022819a4a8eb..da0a6922a26f 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.h +++ b/drivers/gpu/drm/xe/xe_hw_engine.h @@ -11,6 +11,7 @@ struct drm_printer; struct drm_xe_engine_class_instance; struct xe_device; +struct xe_sched_job; #ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN #define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN @@ -54,12 +55,9 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec); void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe); u32 xe_hw_engine_mask_per_class(struct xe_gt *gt, enum xe_engine_class engine_class); - struct xe_hw_engine_snapshot * -xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe); +xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_sched_job *job); void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot); -void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, - struct drm_printer *p); void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p); void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe); diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h index 8be6d420ece4..719f27ef00a5 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h @@ -152,6 +152,11 @@ struct xe_hw_engine { struct xe_hw_engine_group *hw_engine_group; }; +enum xe_hw_engine_snapshot_source_id { + XE_ENGINE_CAPTURE_SOURCE_MANUAL, + XE_ENGINE_CAPTURE_SOURCE_GUC +}; + /** * struct xe_hw_engine_snapshot - Hardware engine snapshot * @@ -160,6 +165,8 @@ struct xe_hw_engine { struct xe_hw_engine_snapshot { /** @name: name of the hw engine */ char *name; + /** @source: Data source, either manual or GuC */ + enum xe_hw_engine_snapshot_source_id source; /** @hwe: hw engine */ struct xe_hw_engine *hwe; /** @logical_instance: logical instance of this hw engine */ @@ -173,65 +180,8 @@ struct xe_hw_engine_snapshot { } forcewake; /** @mmio_base: MMIO base address of this hw engine*/ u32 mmio_base; - /** @reg: Useful MMIO register snapshot */ - struct { - /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */ - u64 ring_execlist_status; - /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */ - u64 ring_execlist_sq_contents; - /** @reg.ring_acthd: RING_ACTHD */ - u64 ring_acthd; - /** @reg.ring_bbaddr: RING_BBADDR */ - u64 ring_bbaddr; - /** @reg.ring_dma_fadd: RING_DMA_FADD */ - u64 ring_dma_fadd; - /** @reg.ring_hwstam: RING_HWSTAM */ - u32 ring_hwstam; - /** @reg.ring_hws_pga: RING_HWS_PGA */ - u32 ring_hws_pga; - /** @reg.ring_start: RING_START */ - u64 ring_start; - /** @reg.ring_head: RING_HEAD */ - u32 ring_head; - /** @reg.ring_tail: RING_TAIL */ - u32 ring_tail; - /** @reg.ring_ctl: RING_CTL */ - u32 ring_ctl; - /** @reg.ring_mi_mode: RING_MI_MODE */ - u32 ring_mi_mode; - /** @reg.ring_mode: RING_MODE */ - u32 ring_mode; - /** @reg.ring_imr: RING_IMR */ - u32 ring_imr; - /** @reg.ring_esr: RING_ESR */ - u32 ring_esr; - /** @reg.ring_emr: RING_EMR */ - u32 ring_emr; - /** @reg.ring_eir: RING_EIR */ - u32 ring_eir; - /** @reg.indirect_ring_state: INDIRECT_RING_STATE */ - u32 indirect_ring_state; - /** @reg.ipehr: IPEHR */ - u32 ipehr; - /** @reg.rcu_mode: RCU_MODE */ - u32 rcu_mode; - struct { - /** @reg.instdone.ring: RING_INSTDONE */ - u32 ring; - /** @reg.instdone.slice_common: SC_INSTDONE */ - u32 *slice_common; - /** @reg.instdone.slice_common_extra: SC_INSTDONE_EXTRA */ - u32 *slice_common_extra; - /** @reg.instdone.slice_common_extra2: SC_INSTDONE_EXTRA2 */ - u32 *slice_common_extra2; - /** @reg.instdone.sampler: SAMPLER_INSTDONE */ - u32 *sampler; - /** @reg.instdone.row: ROW_INSTDONE */ - u32 *row; - /** @reg.instdone.geom_svg: INSTDONE_GEOM_SVGUNIT */ - u32 *geom_svg; - } instdone; - } reg; + /** @kernel_reserved: Engine reserved, can't be used by userspace */ + bool kernel_reserved; }; #endif diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index aa11728e7e79..fde56dad3ab7 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -149,7 +149,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v u64 reg_val, min, max; struct xe_device *xe = hwmon->xe; struct xe_reg rapl_limit, pkg_power_sku; - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); @@ -190,7 +190,7 @@ unlock: static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); int ret = 0; u64 reg_val; struct xe_reg rapl_limit; @@ -222,7 +222,7 @@ unlock: static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); u64 reg_val; @@ -259,7 +259,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l static void xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; u64 reg_val; @@ -282,7 +282,7 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at char *buf) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u32 x, y, x_w = 2; /* 2 bits */ u64 r, tau4, out; int sensor_index = to_sensor_dev_attr(attr)->index; @@ -323,7 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a const char *buf, size_t count) { struct xe_hwmon *hwmon = dev_get_drvdata(dev); - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u32 x, y, rxy, x_w = 2; /* 2 bits */ u64 tau4, r, max_win; unsigned long val; @@ -498,7 +498,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) { - struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe); + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); u64 reg_val; reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); @@ -781,7 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = { static void xe_hwmon_get_preregistration_info(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct xe_hwmon *hwmon = xe->hwmon; long energy; u64 val_sku_unit = 0; diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 5f2c368c35ad..b7995ebd54ab 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -10,8 +10,7 @@ #include <drm/drm_managed.h> #include "display/xe_display.h" -#include "regs/xe_gt_regs.h" -#include "regs/xe_regs.h" +#include "regs/xe_irq_regs.h" #include "xe_device.h" #include "xe_drv.h" #include "xe_gsc_proxy.h" @@ -30,14 +29,14 @@ #define IIR(offset) XE_REG(offset + 0x8) #define IER(offset) XE_REG(offset + 0xc) -static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg) +static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg) { u32 val = xe_mmio_read32(mmio, reg); if (val == 0) return; - drm_WARN(>_to_xe(mmio)->drm, 1, + drm_WARN(&mmio->tile->xe->drm, 1, "Interrupt register 0x%x is not zero: 0x%08x\n", reg.addr, val); xe_mmio_write32(mmio, reg, 0xffffffff); @@ -52,7 +51,7 @@ static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg) */ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; /* * If we're just enabling an interrupt now, it shouldn't already @@ -70,7 +69,7 @@ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) /* Mask and disable all interrupts. */ static void mask_and_disable(struct xe_tile *tile, u32 irqregs) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; xe_mmio_write32(mmio, IMR(irqregs), ~0); /* Posting read */ @@ -87,7 +86,7 @@ static void mask_and_disable(struct xe_tile *tile, u32 irqregs) static u32 xelp_intr_disable(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0); @@ -103,7 +102,7 @@ static u32 xelp_intr_disable(struct xe_device *xe) static u32 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u32 iir; if (!(master_ctl & GU_MISC_IRQ)) @@ -118,7 +117,7 @@ gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) static inline void xelp_intr_enable(struct xe_device *xe, bool stall) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ); if (stall) @@ -129,12 +128,13 @@ static inline void xelp_intr_enable(struct xe_device *xe, bool stall) void xe_irq_enable_hwe(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + struct xe_mmio *mmio = >->mmio; u32 ccs_mask, bcs_mask; u32 irqs, dmask, smask; u32 gsc_mask = 0; u32 heci_mask = 0; - if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) + if (xe_device_uses_memirq(xe)) return; if (xe_device_uc_enabled(xe)) { @@ -155,35 +155,35 @@ void xe_irq_enable_hwe(struct xe_gt *gt) if (!xe_gt_is_media_type(gt)) { /* Enable interrupts for each engine class */ - xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask); + xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); if (ccs_mask) - xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask); + xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask); - xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask); + xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); + xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); if (bcs_mask & (BIT(1)|BIT(2))) - xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); if (bcs_mask & (BIT(3)|BIT(4))) - xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); if (bcs_mask & (BIT(5)|BIT(6))) - xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); if (bcs_mask & (BIT(7)|BIT(8))) - xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); if (ccs_mask & (BIT(0)|BIT(1))) - xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); } if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { /* Enable interrupts for each engine class */ - xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask); + xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); /* Unmask interrupts for each engine instance */ - xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask); - xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask); - xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); + xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); /* * the heci2 interrupt is enabled via the same register as the @@ -197,17 +197,17 @@ void xe_irq_enable_hwe(struct xe_gt *gt) } if (gsc_mask) { - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask); + xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); + xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask); } if (heci_mask) - xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); + xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); } } static u32 gt_engine_identity(struct xe_device *xe, - struct xe_gt *mmio, + struct xe_mmio *mmio, const unsigned int bank, const unsigned int bit) { @@ -279,7 +279,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile, return tile->media_gt; default: break; - }; + } fallthrough; default: return tile->primary_gt; @@ -291,7 +291,7 @@ static void gt_irq_handler(struct xe_tile *tile, u32 *identity) { struct xe_device *xe = tile_to_xe(tile); - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; unsigned int bank, bit; u16 instance, intr_vec; enum xe_engine_class class; @@ -376,7 +376,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg) static u32 dg1_intr_disable(struct xe_device *xe) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u32 val; /* First disable interrupts */ @@ -394,7 +394,7 @@ static u32 dg1_intr_disable(struct xe_device *xe) static void dg1_intr_enable(struct xe_device *xe, bool stall) { - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); if (stall) @@ -431,7 +431,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) } for_each_tile(tile, xe, id) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) continue; @@ -474,7 +474,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) static void gt_irq_reset(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, XE_ENGINE_CLASS_COMPUTE); @@ -504,7 +504,7 @@ static void gt_irq_reset(struct xe_tile *tile) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); if ((tile->media_gt && xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) || @@ -547,7 +547,7 @@ static void dg1_irq_reset(struct xe_tile *tile) static void dg1_irq_reset_mstr(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); } @@ -566,7 +566,7 @@ static void vf_irq_reset(struct xe_device *xe) for_each_tile(tile, xe, id) { if (xe_device_has_memirq(xe)) - xe_memirq_reset(&tile->sriov.vf.memirq); + xe_memirq_reset(&tile->memirq); else gt_irq_reset(tile); } @@ -609,7 +609,7 @@ static void vf_irq_postinstall(struct xe_device *xe) for_each_tile(tile, xe, id) if (xe_device_has_memirq(xe)) - xe_memirq_postinstall(&tile->sriov.vf.memirq); + xe_memirq_postinstall(&tile->memirq); if (GRAPHICS_VERx100(xe) < 1210) xelp_intr_enable(xe, true); @@ -652,7 +652,7 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg) spin_unlock(&xe->irq.lock); for_each_tile(tile, xe, id) - xe_memirq_handler(&tile->sriov.vf.memirq); + xe_memirq_handler(&tile->memirq); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 8999ac511555..a60ceae4c6dd 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -193,7 +193,7 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt) lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo)); lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K)); - xe_mmio_write32(tile->primary_gt, + xe_mmio_write32(&tile->mmio, GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG, LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K)); } diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index aec7db39c061..4f64c7f4e68d 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -38,24 +38,6 @@ #define LRC_INDIRECT_RING_STATE_SIZE SZ_4K -struct xe_lrc_snapshot { - struct xe_bo *lrc_bo; - void *lrc_snapshot; - unsigned long lrc_size, lrc_offset; - - u32 context_desc; - u32 indirect_context_desc; - u32 head; - struct { - u32 internal; - u32 memory; - } tail; - u32 start_seqno; - u32 seqno; - u32 ctx_timestamp; - u32 ctx_job_timestamp; -}; - static struct xe_device * lrc_to_xe(struct xe_lrc *lrc) { @@ -599,10 +581,10 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) { - struct xe_memirq *memirq = >_to_tile(hwe->gt)->sriov.vf.memirq; + struct xe_memirq *memirq = >_to_tile(hwe->gt)->memirq; struct xe_device *xe = gt_to_xe(hwe->gt); - if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe)) + if (!xe_device_uses_memirq(xe)) return; regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM | @@ -613,9 +595,9 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED; regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr; - regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq); + regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe); regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr; - regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq); + regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe); } static int lrc_ring_mi_mode(struct xe_hw_engine *hwe) diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index c24542e89318..40d8f6906d3e 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -17,9 +17,26 @@ enum xe_engine_class; struct xe_gt; struct xe_hw_engine; struct xe_lrc; -struct xe_lrc_snapshot; struct xe_vm; +struct xe_lrc_snapshot { + struct xe_bo *lrc_bo; + void *lrc_snapshot; + unsigned long lrc_size, lrc_offset; + + u32 context_desc; + u32 indirect_context_desc; + u32 head; + struct { + u32 internal; + u32 memory; + } tail; + u32 start_seqno; + u32 seqno; + u32 ctx_timestamp; + u32 ctx_job_timestamp; +}; + #define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4) struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c index 95b6e9d7b7db..f833da88150a 100644 --- a/drivers/gpu/drm/xe/xe_memirq.c +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -5,8 +5,8 @@ #include <drm/drm_managed.h> -#include "regs/xe_gt_regs.h" #include "regs/xe_guc_regs.h" +#include "regs/xe_irq_regs.h" #include "regs/xe_regs.h" #include "xe_assert.h" @@ -19,15 +19,25 @@ #include "xe_hw_engine.h" #include "xe_map.h" #include "xe_memirq.h" -#include "xe_sriov.h" -#include "xe_sriov_printk.h" #define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition) -#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg) +#define memirq_printk(m, _level, _fmt, ...) \ + drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \ + memirq_to_tile(m)->id, ##__VA_ARGS__) + +#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ +#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__) +#else +#define memirq_debug(...) +#endif + +#define memirq_err(m, _fmt, ...) memirq_printk(m, err, _fmt, ##__VA_ARGS__) +#define memirq_err_ratelimited(m, _fmt, ...) \ + memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__) static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq) { - return container_of(memirq, struct xe_tile, sriov.vf.memirq); + return container_of(memirq, struct xe_tile, memirq); } static struct xe_device *memirq_to_xe(struct xe_memirq *memirq) @@ -105,6 +115,44 @@ static const char *guc_name(struct xe_guc *guc) * | | * | | * +-----------+ + * + * + * MSI-X use case + * + * When using MSI-X, hw engines report interrupt status and source to engine + * instance 0. For this scenario, in order to differentiate between the + * engines, we need to pass different status/source pointers in the LRC. + * + * The requirements on those pointers are: + * - Interrupt status should be 4KiB aligned + * - Interrupt source should be 64 bytes aligned + * + * To accommodate this, we duplicate the memirq page layout above - + * allocating a page for each engine instance and pass this page in the LRC. + * Note that the same page can be reused for different engine types. + * For example, an LRC executing on CCS #x will have pointers to page #x, + * and an LRC executing on BCS #x will have the same pointers. + * + * :: + * + * 0x0000 +==============================+ <== page for instance 0 (BCS0, CCS0, etc.) + * | Interrupt Status Report Page | + * 0x0400 +==============================+ + * | Interrupt Source Report Page | + * 0x0440 +==============================+ + * | Interrupt Enable Mask | + * +==============================+ + * | Not used | + * 0x1000 +==============================+ <== page for instance 1 (BCS1, CCS1, etc.) + * | Interrupt Status Report Page | + * 0x1400 +==============================+ + * | Interrupt Source Report Page | + * 0x1440 +==============================+ + * | Not used | + * 0x2000 +==============================+ <== page for instance 2 (BCS2, CCS2, etc.) + * | ... | + * +==============================+ + * */ static void __release_xe_bo(struct drm_device *drm, void *arg) @@ -114,18 +162,30 @@ static void __release_xe_bo(struct drm_device *drm, void *arg) xe_bo_unpin_map_no_vm(bo); } +static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq) +{ + /* + * When the HW engines are configured to use MSI-X, + * they report interrupt status and source to the offset of + * engine instance 0. + */ + return xe_device_has_msix(memirq_to_xe(memirq)); +} + static int memirq_alloc_pages(struct xe_memirq *memirq) { struct xe_device *xe = memirq_to_xe(memirq); struct xe_tile *tile = memirq_to_tile(memirq); + size_t bo_size = hw_reports_to_instance_zero(memirq) ? + XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K; struct xe_bo *bo; int err; - BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64)); - BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K)); + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64)); + BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K)); /* XXX: convert to managed bo */ - bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, + bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size, ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | @@ -140,25 +200,25 @@ static int memirq_alloc_pages(struct xe_memirq *memirq) memirq_assert(memirq, !xe_bo_is_vram(bo)); memirq_assert(memirq, !memirq->bo); - iosys_map_memset(&bo->vmap, 0, 0, SZ_4K); + iosys_map_memset(&bo->vmap, 0, 0, bo_size); memirq->bo = bo; - memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET); - memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET); + memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0)); + memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0)); memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET); memirq_assert(memirq, !memirq->source.is_iomem); memirq_assert(memirq, !memirq->status.is_iomem); memirq_assert(memirq, !memirq->mask.is_iomem); - memirq_debug(memirq, "page offsets: source %#x status %#x\n", - xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq)); + memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n", + xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0), + XE_MEMIRQ_STATUS_OFFSET(0)); return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo); out: - xe_sriov_err(memirq_to_xe(memirq), - "Failed to allocate memirq page (%pe)\n", ERR_PTR(err)); + memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err)); return err; } @@ -178,9 +238,7 @@ static void memirq_set_enable(struct xe_memirq *memirq, bool enable) * * These allocations are managed and will be implicitly released on unload. * - * Note: This function shall be called only by the VF driver. - * - * If this function fails then VF driver won't be able to operate correctly. + * If this function fails then the driver won't be able to operate correctly. * If `Memory Based Interrupts`_ are not used this function will return 0. * * Return: 0 on success or a negative error code on failure. @@ -190,9 +248,7 @@ int xe_memirq_init(struct xe_memirq *memirq) struct xe_device *xe = memirq_to_xe(memirq); int err; - memirq_assert(memirq, IS_SRIOV_VF(xe)); - - if (!xe_device_has_memirq(xe)) + if (!xe_device_uses_memirq(xe)) return 0; err = memirq_alloc_pages(memirq); @@ -205,55 +261,70 @@ int xe_memirq_init(struct xe_memirq *memirq) return 0; } +static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance) +{ + memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE); + memirq_assert(memirq, memirq->bo); + + instance = hw_reports_to_instance_zero(memirq) ? instance : 0; + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance); +} + /** * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_. * @memirq: the &xe_memirq to query + * @hwe: the hw engine for which we want the report page * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the `Interrupt Source Report Page`_. */ -u32 xe_memirq_source_ptr(struct xe_memirq *memirq) +u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); + + return __memirq_source_page(memirq, hwe->instance); +} + +static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance) +{ + memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE); memirq_assert(memirq, memirq->bo); - return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET; + instance = hw_reports_to_instance_zero(memirq) ? instance : 0; + return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance); } /** * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_. * @memirq: the &xe_memirq to query + * @hwe: the hw engine for which we want the report page * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the `Interrupt Status Report Page`_. */ -u32 xe_memirq_status_ptr(struct xe_memirq *memirq) +u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); - memirq_assert(memirq, memirq->bo); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); - return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET; + return __memirq_status_page(memirq, hwe->instance); } /** * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask. * @memirq: the &xe_memirq to query * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: GGTT's offset of the Interrupt Enable Mask. */ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); memirq_assert(memirq, memirq->bo); return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET; @@ -267,7 +338,7 @@ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq) * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_ * to be used by the GuC when `Memory Based Interrupts`_ are required. * - * Shall be called only on VF driver when `Memory Based Interrupts`_ are used + * Shall be called when `Memory Based Interrupts`_ are used * and xe_memirq_init() didn't fail. * * Return: 0 on success or a negative error code on failure. @@ -279,12 +350,10 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) u32 source, status; int err; - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); - memirq_assert(memirq, memirq->bo); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); - source = xe_memirq_source_ptr(memirq) + offset; - status = xe_memirq_status_ptr(memirq) + offset * SZ_16; + source = __memirq_source_page(memirq, 0) + offset; + status = __memirq_status_page(memirq, 0) + offset * SZ_16; err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY, source); @@ -299,9 +368,8 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc) return 0; failed: - xe_sriov_err(memirq_to_xe(memirq), - "Failed to setup report pages in %s (%pe)\n", - guc_name(guc), ERR_PTR(err)); + memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n", + guc_name(guc), ERR_PTR(err)); return err; } @@ -311,13 +379,12 @@ failed: * * This is part of the driver IRQ setup flow. * - * This function shall only be used by the VF driver on platforms that use + * This function shall only be used on platforms that use * `Memory Based Interrupts`_. */ void xe_memirq_reset(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); if (memirq->bo) memirq_set_enable(memirq, false); @@ -329,13 +396,12 @@ void xe_memirq_reset(struct xe_memirq *memirq) * * This is part of the driver IRQ setup flow. * - * This function shall only be used by the VF driver on platforms that use + * This function shall only be used on platforms that use * `Memory Based Interrupts`_. */ void xe_memirq_postinstall(struct xe_memirq *memirq) { - memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq))); - memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq))); + memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq))); if (memirq->bo) memirq_set_enable(memirq, true); @@ -349,9 +415,9 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector, value = iosys_map_rd(vector, offset, u8); if (value) { if (value != 0xff) - xe_sriov_err_ratelimited(memirq_to_xe(memirq), - "Unexpected memirq value %#x from %s at %u\n", - value, name, offset); + memirq_err_ratelimited(memirq, + "Unexpected memirq value %#x from %s at %u\n", + value, name, offset); iosys_map_wr(vector, offset, u8, 0x00); } @@ -379,6 +445,28 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat } /** + * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine. + * @memirq: the &xe_memirq + * @hwe: the hw engine to process + * + * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine. + */ +void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe) +{ + u16 offset = hwe->irq_offset; + u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0; + struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, + XE_MEMIRQ_SOURCE_OFFSET(instance)); + + if (memirq_received(memirq, &src_offset, offset, "SRC")) { + struct iosys_map status_offset = + IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap, + XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16); + memirq_dispatch_engine(memirq, &status_offset, hwe); + } +} + +/** * xe_memirq_handler - The `Memory Based Interrupts`_ Handler. * @memirq: the &xe_memirq * @@ -405,13 +493,8 @@ void xe_memirq_handler(struct xe_memirq *memirq) if (gt->tile != tile) continue; - for_each_hw_engine(hwe, gt, id) { - if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) { - map = IOSYS_MAP_INIT_OFFSET(&memirq->status, - hwe->irq_offset * SZ_16); - memirq_dispatch_engine(memirq, &map, hwe); - } - } + for_each_hw_engine(hwe, gt, id) + xe_memirq_hwe_handler(memirq, hwe); } /* GuC and media GuC (if present) must be checked separately */ diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h index 2d40d03c3095..06130650e9d6 100644 --- a/drivers/gpu/drm/xe/xe_memirq.h +++ b/drivers/gpu/drm/xe/xe_memirq.h @@ -9,16 +9,18 @@ #include <linux/types.h> struct xe_guc; +struct xe_hw_engine; struct xe_memirq; int xe_memirq_init(struct xe_memirq *memirq); -u32 xe_memirq_source_ptr(struct xe_memirq *memirq); -u32 xe_memirq_status_ptr(struct xe_memirq *memirq); +u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe); +u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe); u32 xe_memirq_enable_ptr(struct xe_memirq *memirq); void xe_memirq_reset(struct xe_memirq *memirq); void xe_memirq_postinstall(struct xe_memirq *memirq); +void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe); void xe_memirq_handler(struct xe_memirq *memirq); int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc); diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h index 625b6b8736cc..9d0f6c1cdb9d 100644 --- a/drivers/gpu/drm/xe/xe_memirq_types.h +++ b/drivers/gpu/drm/xe/xe_memirq_types.h @@ -11,9 +11,9 @@ struct xe_bo; /* ISR */ -#define XE_MEMIRQ_STATUS_OFFSET 0x0 +#define XE_MEMIRQ_STATUS_OFFSET(inst) ((inst) * SZ_4K + 0x0) /* IIR */ -#define XE_MEMIRQ_SOURCE_OFFSET 0x400 +#define XE_MEMIRQ_SOURCE_OFFSET(inst) ((inst) * SZ_4K + 0x400) /* IMR */ #define XE_MEMIRQ_ENABLE_OFFSET 0x440 diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 3fd462fda625..a48f239cad1c 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -36,13 +36,19 @@ static void tiles_fini(void *arg) /* * On multi-tile devices, partition the BAR space for MMIO on each tile, * possibly accounting for register override on the number of tiles available. + * tile_mmio_size contains both the tile's 4MB register space, as well as + * additional space for the GTT and other (possibly unused) regions). * Resulting memory layout is like below: * * .----------------------. <- tile_count * tile_mmio_size * | .... | * |----------------------| <- 2 * tile_mmio_size + * | tile1 GTT + other | + * |----------------------| <- 1 * tile_mmio_size + 4MB * | tile1->mmio.regs | * |----------------------| <- 1 * tile_mmio_size + * | tile0 GTT + other | + * |----------------------| <- 4MB * | tile0->mmio.regs | * '----------------------' <- 0MB */ @@ -61,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) /* Possibly override number of tile based on configuration register */ if (!xe->info.skip_mtcfg) { - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); u8 tile_count; u32 mtcfg; /* * Although the per-tile mmio regs are not yet initialized, this - * is fine as it's going to the root gt, that's guaranteed to be - * initialized earlier in xe_mmio_init() + * is fine as it's going to the root tile's mmio, that's + * guaranteed to be initialized earlier in xe_mmio_init() */ - mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR); + mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR); tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; if (tile_count < xe->info.tile_count) { @@ -90,8 +96,9 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) regs = xe->mmio.regs; for_each_tile(tile, xe, id) { - tile->mmio.size = tile_mmio_size; + tile->mmio.regs_size = SZ_4M; tile->mmio.regs = regs; + tile->mmio.tile = tile; regs += tile_mmio_size; } } @@ -126,8 +133,9 @@ static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size, regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count; for_each_tile(tile, xe, id) { - tile->mmio_ext.size = tile_mmio_ext_size; + tile->mmio_ext.regs_size = tile_mmio_ext_size; tile->mmio_ext.regs = regs; + tile->mmio_ext.tile = tile; regs += tile_mmio_ext_size; } } @@ -157,137 +165,132 @@ int xe_mmio_init(struct xe_device *xe) { struct xe_tile *root_tile = xe_device_get_root_tile(xe); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - const int mmio_bar = 0; /* * Map the entire BAR. * The first 16MB of the BAR, belong to the root tile, and include: * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB). */ - xe->mmio.size = pci_resource_len(pdev, mmio_bar); - xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR); + xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); + xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); if (xe->mmio.regs == NULL) { drm_err(&xe->drm, "failed to map registers\n"); return -EIO; } /* Setup first tile; other tiles (if present) will be setup later. */ - root_tile->mmio.size = SZ_16M; + root_tile->mmio.regs_size = SZ_4M; root_tile->mmio.regs = xe->mmio.regs; + root_tile->mmio.tile = root_tile; return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); } -static void mmio_flush_pending_writes(struct xe_gt *gt) +static void mmio_flush_pending_writes(struct xe_mmio *mmio) { #define DUMMY_REG_OFFSET 0x130030 - struct xe_tile *tile = gt_to_tile(gt); int i; - if (tile->xe->info.platform != XE_LUNARLAKE) + if (mmio->tile->xe->info.platform != XE_LUNARLAKE) return; /* 4 dummy writes */ for (i = 0; i < 4; i++) - writel(0, tile->mmio.regs + DUMMY_REG_OFFSET); + writel(0, mmio->regs + DUMMY_REG_OFFSET); } -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg) +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u8 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + val = readb(mmio->regs + addr); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg) +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u16 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + val = readw(mmio->regs + addr); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) +void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); - trace_xe_reg_rw(gt, true, addr, val, sizeof(val)); + trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) - xe_gt_sriov_vf_write32(gt, reg, val); + if (!reg.vf && mmio->sriov_vf_gt) + xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); else - writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + writel(val, mmio->regs + addr); } -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) +u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) { - struct xe_tile *tile = gt_to_tile(gt); - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u32 val; /* Wa_15015404425 */ - mmio_flush_pending_writes(gt); + mmio_flush_pending_writes(mmio); - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt))) - val = xe_gt_sriov_vf_read32(gt, reg); + if (!reg.vf && mmio->sriov_vf_gt) + val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); else - val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr); + val = readl(mmio->regs + addr); - trace_xe_reg_rw(gt, false, addr, val, sizeof(val)); + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); return val; } -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set) +u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set) { u32 old, reg_val; - old = xe_mmio_read32(gt, reg); + old = xe_mmio_read32(mmio, reg); reg_val = (old & ~clr) | set; - xe_mmio_write32(gt, reg, reg_val); + xe_mmio_write32(mmio, reg, reg_val); return old; } -int xe_mmio_write32_and_verify(struct xe_gt *gt, +int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval) { u32 reg_val; - xe_mmio_write32(gt, reg, val); - reg_val = xe_mmio_read32(gt, reg); + xe_mmio_write32(mmio, reg, val); + reg_val = xe_mmio_read32(mmio, reg); return (reg_val & mask) != eval ? -EINVAL : 0; } -bool xe_mmio_in_range(const struct xe_gt *gt, +bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg) { - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr); + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); return range && addr >= range->start && addr <= range->end; } /** * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * * Although Intel GPUs have some 64-bit registers, the hardware officially @@ -307,21 +310,21 @@ bool xe_mmio_in_range(const struct xe_gt *gt, * * Returns the value of the 64-bit register. */ -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) +u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) { struct xe_reg reg_udw = { .addr = reg.addr + 0x4 }; u32 ldw, udw, oldudw, retries; - reg.addr = xe_mmio_adjusted_addr(gt, reg.addr); - reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr); + reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); + reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); /* we shouldn't adjust just one register address */ - xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4); + xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); - oldudw = xe_mmio_read32(gt, reg_udw); + oldudw = xe_mmio_read32(mmio, reg_udw); for (retries = 5; retries; --retries) { - ldw = xe_mmio_read32(gt, reg); - udw = xe_mmio_read32(gt, reg_udw); + ldw = xe_mmio_read32(mmio, reg); + udw = xe_mmio_read32(mmio, reg_udw); if (udw == oldudw) break; @@ -329,13 +332,13 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) oldudw = udw; } - xe_gt_WARN(gt, retries == 0, - "64-bit read of %#x did not stabilize\n", reg.addr); + drm_WARN(&mmio->tile->xe->drm, retries == 0, + "64-bit read of %#x did not stabilize\n", reg.addr); return (u64)udw << 32 | ldw; } -static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic, bool expect_match) { ktime_t cur = ktime_get_raw(); @@ -346,7 +349,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v bool check; for (;;) { - read = xe_mmio_read32(gt, reg); + read = xe_mmio_read32(mmio, reg); check = (read & mask) == val; if (!expect_match) @@ -372,7 +375,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v } if (ret != 0) { - read = xe_mmio_read32(gt, reg); + read = xe_mmio_read32(mmio, reg); check = (read & mask) == val; if (!expect_match) @@ -390,7 +393,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v /** * xe_mmio_wait32() - Wait for a register to match the desired masked value - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: desired value after applying the mask @@ -407,15 +410,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v * @timeout_us for different reasons, specially in non-atomic contexts. Thus, * it is possible that this function succeeds even after @timeout_us has passed. */ -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true); + return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true); } /** * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value - * @gt: MMIO target GT + * @mmio: MMIO target * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: value not to be matched after applying the mask @@ -426,8 +429,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t * This function works exactly like xe_mmio_wait32() with the exception that * @val is expected not to be matched. */ -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, +int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false); + return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false); } diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index 26551410ecc8..8a46f4006a84 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -14,25 +14,30 @@ struct xe_reg; int xe_mmio_init(struct xe_device *xe); int xe_mmio_probe_tiles(struct xe_device *xe); -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg); -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg); -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val); -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg); -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set); -int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval); -bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg); - -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg); -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic); -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, - u32 *out_val, bool atomic); - -static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr) +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg); +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg); +void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val); +u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg); +u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set); +int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval); +bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg); + +u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg); +int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, + u32 timeout_us, u32 *out_val, bool atomic); +int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, + u32 val, u32 timeout_us, u32 *out_val, bool atomic); + +static inline u32 xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr) { - if (addr < gt->mmio.adj_limit) - addr += gt->mmio.adj_offset; + if (addr < mmio->adj_limit) + addr += mmio->adj_offset; return addr; } +static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe) +{ + return &xe->tiles[0].mmio; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 7ff0ac5b799a..54d199b5cfb2 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -278,7 +278,7 @@ static void xelp_lncf_dump(struct xe_mocs_info *info, struct xe_gt *gt, struct d if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", j++, @@ -310,7 +310,7 @@ static void xelp_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u ] (%#8x)\n", i, @@ -383,7 +383,7 @@ static void xehp_lncf_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n", j++, @@ -428,7 +428,7 @@ static void pvc_mocs_dump(struct xe_mocs_info *info, unsigned int flags, struct if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_LNCFCMOCS(i)); drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n", j++, @@ -510,7 +510,7 @@ static void mtl_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u] (%#8x)\n", i, @@ -553,7 +553,7 @@ static void xe2_mocs_dump(struct xe_mocs_info *info, unsigned int flags, if (regs_are_mcr(gt)) reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); else - reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + reg_val = xe_mmio_read32(>->mmio, XELP_GLOBAL_MOCS(i)); drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u] (%#8x)\n", i, @@ -576,6 +576,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe, memset(info, 0, sizeof(struct xe_mocs_info)); switch (xe->info.platform) { + case XE_PANTHERLAKE: case XE_LUNARLAKE: case XE_BATTLEMAGE: info->ops = &xe2_mocs_ops; @@ -690,7 +691,7 @@ static void __init_mocs_table(struct xe_gt *gt, if (regs_are_mcr(gt)) xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs); else - xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs); + xe_mmio_write32(>->mmio, XELP_GLOBAL_MOCS(i), mocs); } } @@ -730,7 +731,7 @@ static void init_l3cc_table(struct xe_gt *gt, if (regs_are_mcr(gt)) xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc); else - xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc); + xe_mmio_write32(>->mmio, XELP_LNCFCMOCS(i), l3cc); } } @@ -773,25 +774,21 @@ void xe_mocs_init(struct xe_gt *gt) void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) { - struct xe_mocs_info table; - unsigned int flags; - u32 ret; struct xe_device *xe = gt_to_xe(gt); + struct xe_mocs_info table; + unsigned int fw_ref, flags; flags = get_mocs_settings(xe, &table); xe_pm_runtime_get_noresume(xe); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - - if (ret) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) goto err_fw; table.ops->dump(&table, flags, gt, p); - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - + xe_force_wake_put(gt_to_fw(gt), fw_ref); err_fw: - xe_assert(xe, !ret); xe_pm_runtime_put(xe); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 78823f53d290..8dd55798ab31 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -36,11 +36,22 @@ #include "xe_pm.h" #include "xe_sched_job.h" #include "xe_sriov.h" +#include "xe_sync.h" #define DEFAULT_POLL_FREQUENCY_HZ 200 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) #define XE_OA_UNIT_INVALID U32_MAX +enum xe_oa_submit_deps { + XE_OA_SUBMIT_NO_DEPS, + XE_OA_SUBMIT_ADD_DEPS, +}; + +enum xe_oa_user_extn_from { + XE_OA_USER_EXTN_FROM_OPEN, + XE_OA_USER_EXTN_FROM_CONFIG, +}; + struct xe_oa_reg { struct xe_reg addr; u32 value; @@ -70,6 +81,7 @@ struct flex { }; struct xe_oa_open_param { + struct xe_file *xef; u32 oa_unit_id; bool sample; u32 metric_set; @@ -81,6 +93,9 @@ struct xe_oa_open_param { struct xe_exec_queue *exec_q; struct xe_hw_engine *hwe; bool no_preempt; + struct drm_xe_sync __user *syncs_user; + int num_syncs; + struct xe_sync_entry *syncs; }; struct xe_oa_config_bo { @@ -90,6 +105,17 @@ struct xe_oa_config_bo { struct xe_bb *bb; }; +struct xe_oa_fence { + /* @base: dma fence base */ + struct dma_fence base; + /* @lock: lock for the fence */ + spinlock_t lock; + /* @work: work to signal @base */ + struct delayed_work work; + /* @cb: callback to schedule @work */ + struct dma_fence_cb cb; +}; + #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x static const struct xe_oa_format oa_formats[] = { @@ -162,10 +188,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se return oa_config; } -static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo) +static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence) { xe_oa_config_put(oa_bo->oa_config); - xe_bb_free(oa_bo->bb, NULL); + xe_bb_free(oa_bo->bb, last_fence); kfree(oa_bo); } @@ -176,7 +202,7 @@ static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream) static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream) { - return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) & + return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) & OAG_OATAILPTR_MASK; } @@ -366,7 +392,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - xe_mmio_write32(stream->gt, oaheadptr, + xe_mmio_write32(&stream->gt->mmio, oaheadptr, (head + gtt_offset) & OAG_OAHEADPTR_MASK); stream->oa_buffer.head = head; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -377,22 +403,23 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT; unsigned long flags; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0); + xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr, gtt_offset & OAG_OAHEADPTR_MASK); stream->oa_buffer.head = 0; /* * PRM says: "This MMIO must be set before the OATAILPTR register and after the * OAHEADPTR register. This is to enable proper functionality of the overflow bit". */ - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf); + xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr, gtt_offset & OAG_OATAILPTR_MASK); /* Mark that we need updated tail pointer to read from */ @@ -444,21 +471,23 @@ static void xe_oa_enable(struct xe_oa_stream *stream) stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) val |= OAG_OACONTROL_OA_PES_DISAG_EN; - xe_mmio_write32(stream->gt, regs->oa_ctrl, val); + xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val); } static void xe_oa_disable(struct xe_oa_stream *stream) { - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0); - if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl, + struct xe_mmio *mmio = &stream->gt->mmio; + + xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0); + if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl, OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false)) drm_err(&stream->oa->xe->drm, "wait for OA to be disabled timed out\n"); if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) { /* <= XE_METEORLAKE except XE_PVC */ - xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1); - if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false)) + xe_mmio_write32(mmio, OA_TLB_INV_CR, 1); + if (xe_mmio_wait32(mmio, OA_TLB_INV_CR, 1, 0, 50000, NULL, false)) drm_err(&stream->oa->xe->drm, "wait for OA tlb invalidate timed out\n"); } @@ -481,7 +510,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf, size_t count, size_t *offset) { /* Only clear our bits to avoid side-effects */ - stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status, + stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status, OASTATUS_RELEVANT_BITS, 0); /* * Signal to userspace that there is non-zero OA status to read via @@ -567,11 +596,11 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait) return ret; } -static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) +static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps, + struct xe_bb *bb) { struct xe_sched_job *job; struct dma_fence *fence; - long timeout; int err = 0; /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */ @@ -581,18 +610,24 @@ static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) goto exit; } + if (deps == XE_OA_SUBMIT_ADD_DEPS) { + for (int i = 0; i < stream->num_syncs && !err; i++) + err = xe_sync_entry_add_deps(&stream->syncs[i], job); + if (err) { + drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err); + goto err_put_job; + } + } + xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - timeout = dma_fence_wait_timeout(fence, false, HZ); - dma_fence_put(fence); - if (timeout < 0) - err = timeout; - else if (!timeout) - err = -ETIME; + return fence; +err_put_job: + xe_sched_job_put(job); exit: - return err; + return ERR_PTR(err); } static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs) @@ -636,7 +671,8 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream) xe_oa_config_put(stream->oa_config); llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) - free_oa_config_bo(oa_bo); + free_oa_config_bo(oa_bo, stream->last_fence); + dma_fence_put(stream->last_fence); } static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, @@ -656,6 +692,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc, const struct flex *flex, u32 count) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -667,7 +704,16 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr xe_oa_store_flex(stream, lrc, bb, flex, count); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -675,6 +721,7 @@ exit: static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri) { + struct dma_fence *fence; struct xe_bb *bb; int err; @@ -686,7 +733,16 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re write_cs_mi_lri(bb, reg_lri, 1); - err = xe_oa_submit_bb(stream, bb); + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; + } + xe_bb_free(bb, fence); + dma_fence_put(fence); + + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -749,7 +805,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) int err; /* Set ccs select to enable programming of OAC_OACONTROL */ - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream)); + xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl, + __oa_ccs_select(stream)); /* Modify stream hwe context image with regs_context */ err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], @@ -785,6 +842,7 @@ static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool en static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 sqcnt1; /* @@ -798,7 +856,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) _MASKED_BIT_DISABLE(DISABLE_DOP_GATING)); } - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, oag_configure_mmio_trigger(stream, false)); /* disable the context save/restore or OAR counters */ @@ -806,13 +864,13 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream) xe_oa_configure_oa_context(stream, false); /* Make sure we disable noa to save power. */ - xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0); + xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0); sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); /* Reset PMON Enable to save power. */ - xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0); + xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0); } static void xe_oa_stream_destroy(struct xe_oa_stream *stream) @@ -832,7 +890,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_oa_free_oa_buffer(stream); - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(stream->oa->xe); /* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */ @@ -840,6 +898,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); xe_oa_free_configs(stream); + xe_file_put(stream->xef); } static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) @@ -910,11 +969,62 @@ out: return oa_bo; } +static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence) +{ + dma_fence_put(stream->last_fence); + stream->last_fence = dma_fence_get(fence); +} + +static void xe_oa_fence_work_fn(struct work_struct *w) +{ + struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work); + + /* Signal fence to indicate new OA configuration is active */ + dma_fence_signal(&ofence->base); + dma_fence_put(&ofence->base); +} + +static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + /* Additional empirical delay needed for NOA programming after registers are written */ +#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 + + struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb); + + INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn); + queue_delayed_work(system_unbound_wq, &ofence->work, + usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US)); + dma_fence_put(fence); +} + +static const char *xe_oa_get_driver_name(struct dma_fence *fence) +{ + return "xe_oa"; +} + +static const char *xe_oa_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops xe_oa_fence_ops = { + .get_driver_name = xe_oa_get_driver_name, + .get_timeline_name = xe_oa_get_timeline_name, +}; + static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config) { #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 struct xe_oa_config_bo *oa_bo; - int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US; + struct xe_oa_fence *ofence; + int i, err, num_signal = 0; + struct dma_fence *fence; + + ofence = kzalloc(sizeof(*ofence), GFP_KERNEL); + if (!ofence) { + err = -ENOMEM; + goto exit; + } oa_bo = xe_oa_alloc_config_buffer(stream, config); if (IS_ERR(oa_bo)) { @@ -922,11 +1032,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config goto exit; } - err = xe_oa_submit_bb(stream, oa_bo->bb); + /* Emit OA configuration batch */ + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto exit; + } - /* Additional empirical delay needed for NOA programming after registers are written */ - usleep_range(us, 2 * us); + /* Point of no return: initialize and set fence to signal */ + spin_lock_init(&ofence->lock); + dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0); + + for (i = 0; i < stream->num_syncs; i++) { + if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL) + num_signal++; + xe_sync_entry_signal(&stream->syncs[i], &ofence->base); + } + + /* Additional dma_fence_get in case we dma_fence_wait */ + if (!num_signal) + dma_fence_get(&ofence->base); + + /* Update last fence too before adding callback */ + xe_oa_update_last_fence(stream, fence); + + /* Add job fence callback to schedule work to signal ofence->base */ + err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb); + xe_gt_assert(stream->gt, !err || err == -ENOENT); + if (err == -ENOENT) + xe_oa_config_cb(fence, &ofence->cb); + + /* If nothing needs to be signaled we wait synchronously */ + if (!num_signal) { + dma_fence_wait(&ofence->base, false); + dma_fence_put(&ofence->base); + } + + /* Done with syncs */ + for (i = 0; i < stream->num_syncs; i++) + xe_sync_entry_cleanup(&stream->syncs[i]); + kfree(stream->syncs); + + return 0; exit: + kfree(ofence); return err; } @@ -940,6 +1089,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream) static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) { + struct xe_mmio *mmio = &stream->gt->mmio; u32 oa_debug, sqcnt1; int ret; @@ -966,12 +1116,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL | OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL; - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug, + xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, _MASKED_BIT_ENABLE(oa_debug) | oag_report_ctx_switches(stream) | oag_configure_mmio_trigger(stream, true)); - xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? + xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? (OAG_OAGLBCTXCTRL_COUNTER_RESUME | OAG_OAGLBCTXCTRL_TIMER_ENABLE | REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK, @@ -985,7 +1135,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) sqcnt1 = SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); - xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1); + xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1); /* Configure OAR/OAC */ if (stream->exec_q) { @@ -997,6 +1147,262 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) return xe_oa_emit_oa_config(stream, stream->oa_config); } +static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) +{ + u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); + u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); + u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); + u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); + int idx; + + for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { + const struct xe_oa_format *f = &oa->oa_formats[idx]; + + if (counter_size == f->counter_size && bc_report == f->bc_report && + type == f->type && counter_sel == f->counter_select) { + *name = idx; + return 0; + } + } + + return -EINVAL; +} + +static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + if (value >= oa->oa_unit_ids) { + drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); + return -EINVAL; + } + param->oa_unit_id = value; + return 0; +} + +static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->sample = value; + return 0; +} + +static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->metric_set = value; + return 0; +} + +static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + int ret = decode_oa_format(oa, value, ¶m->oa_format); + + if (ret) { + drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); + return ret; + } + return 0; +} + +static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ +#define OA_EXPONENT_MAX 31 + + if (value > OA_EXPONENT_MAX) { + drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); + return -EINVAL; + } + param->period_exponent = value; + return 0; +} + +static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->disabled = value; + return 0; +} + +static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->exec_queue_id = value; + return 0; +} + +static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->engine_instance = value; + return 0; +} + +static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->no_preempt = value; + return 0; +} + +static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->num_syncs = value; + return 0; +} + +static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->syncs_user = u64_to_user_ptr(value); + return 0; +} + +static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + return -EINVAL; +} + +typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param); +static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) != + ARRAY_SIZE(xe_oa_set_property_funcs_config)); + + if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || + XE_IOCTL_DBG(oa->xe, ext.pad)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); + + if (from == XE_OA_USER_EXTN_FROM_CONFIG) + return xe_oa_set_property_funcs_config[idx](oa, ext.value, param); + else + return xe_oa_set_property_funcs_open[idx](oa, ext.value, param); +} + +typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param); +static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { + [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension, + int ext_number, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(oa->xe, ext.pad) || + XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); + err = xe_oa_user_extension_funcs[idx](oa, from, extension, param); + if (XE_IOCTL_DBG(oa->xe, err)) + return err; + + if (ext.next_extension) + return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param); + + return 0; +} + +static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) +{ + int ret, num_syncs, num_ufence = 0; + + if (param->num_syncs && !param->syncs_user) { + drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n"); + ret = -EINVAL; + goto exit; + } + + if (param->num_syncs) { + param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL); + if (!param->syncs) { + ret = -ENOMEM; + goto exit; + } + } + + for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { + ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs], + ¶m->syncs_user[num_syncs], 0); + if (ret) + goto err_syncs; + + if (xe_sync_is_ufence(¶m->syncs[num_syncs])) + num_ufence++; + } + + if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) { + ret = -EINVAL; + goto err_syncs; + } + + return 0; + +err_syncs: + while (num_syncs--) + xe_sync_entry_cleanup(¶m->syncs[num_syncs]); + kfree(param->syncs); +exit: + return ret; +} + static void xe_oa_stream_enable(struct xe_oa_stream *stream) { stream->pollin = false; @@ -1090,36 +1496,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream) static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) { - struct drm_xe_ext_set_property ext; + struct xe_oa_open_param param = {}; long ret = stream->oa_config->id; struct xe_oa_config *config; int err; - err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext)); - if (XE_IOCTL_DBG(stream->oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) || - XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET)) - return -EINVAL; + err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m); + if (err) + return err; - config = xe_oa_get_oa_config(stream->oa, ext.value); + config = xe_oa_get_oa_config(stream->oa, param.metric_set); if (!config) return -ENODEV; - if (config != stream->oa_config) { - err = xe_oa_emit_oa_config(stream, config); - if (!err) - config = xchg(&stream->oa_config, config); - else - ret = err; + param.xef = stream->xef; + err = xe_oa_parse_syncs(stream->oa, ¶m); + if (err) + goto err_config_put; + + stream->num_syncs = param.num_syncs; + stream->syncs = param.syncs; + + err = xe_oa_emit_oa_config(stream, config); + if (!err) { + config = xchg(&stream->oa_config, config); + drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n", + stream->oa_config->uuid); } +err_config_put: xe_oa_config_put(config); - return ret; + return err ?: ret; } static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) @@ -1349,6 +1757,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, { struct xe_oa_unit *u = param->hwe->oa_unit; struct xe_gt *gt = param->hwe->gt; + unsigned int fw_ref; int ret; stream->exec_q = param->exec_q; @@ -1362,6 +1771,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; + stream->xef = xe_file_get(param->xef); + stream->num_syncs = param->num_syncs; + stream->syncs = param->syncs; + /* * For Xe2+, when overrun mode is enabled, there are no partial reports at the end * of buffer, making the OA buffer effectively a non-power-of-2 size circular @@ -1409,7 +1822,11 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, /* Take runtime pm ref and forcewake to disable RC6 */ xe_pm_runtime_get(stream->oa->xe); - XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + ret = -ETIMEDOUT; + goto err_fw_put; + } ret = xe_oa_alloc_oa_buffer(stream); if (ret) @@ -1451,13 +1868,14 @@ err_put_k_exec_q: err_free_oa_buf: xe_oa_free_oa_buffer(stream); err_fw_put: - XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); + xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(stream->oa->xe); if (stream->override_gucrc) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); err_free_configs: xe_oa_free_configs(stream); exit: + xe_file_put(stream->xef); return ret; } @@ -1535,7 +1953,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt) case XE_PVC: case XE_METEORLAKE: xe_pm_runtime_get(gt_to_xe(gt)); - reg = xe_mmio_read32(gt, RPM_CONFIG0); + reg = xe_mmio_read32(>->mmio, RPM_CONFIG0); xe_pm_runtime_put(gt_to_xe(gt)); shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg); @@ -1567,27 +1985,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type) } } -static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) -{ - u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); - u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); - u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); - u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); - int idx; - - for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { - const struct xe_oa_format *f = &oa->oa_formats[idx]; - - if (counter_size == f->counter_size && bc_report == f->bc_report && - type == f->type && counter_sel == f->counter_select) { - *name = idx; - return 0; - } - } - - return -EINVAL; -} - /** * xe_oa_unit_id - Return OA unit ID for a hardware engine * @hwe: @xe_hw_engine @@ -1634,155 +2031,6 @@ out: return ret; } -static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - if (value >= oa->oa_unit_ids) { - drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); - return -EINVAL; - } - param->oa_unit_id = value; - return 0; -} - -static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->sample = value; - return 0; -} - -static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->metric_set = value; - return 0; -} - -static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - int ret = decode_oa_format(oa, value, ¶m->oa_format); - - if (ret) { - drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); - return ret; - } - return 0; -} - -static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ -#define OA_EXPONENT_MAX 31 - - if (value > OA_EXPONENT_MAX) { - drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); - return -EINVAL; - } - param->period_exponent = value; - return 0; -} - -static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->disabled = value; - return 0; -} - -static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->exec_queue_id = value; - return 0; -} - -static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->engine_instance = value; - return 0; -} - -static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->no_preempt = value; - return 0; -} - -typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param); -static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = { - [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, - [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, - [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, - [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, - [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, - [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, - [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, - [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, - [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, -}; - -static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_ext_set_property ext; - int err; - u32 idx; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) || - XE_IOCTL_DBG(oa->xe, ext.pad)) - return -EINVAL; - - idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs)); - return xe_oa_set_property_funcs[idx](oa, ext.value, param); -} - -typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param); -static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { - [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, -}; - -#define MAX_USER_EXTENSIONS 16 -static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_user_extension ext; - int err; - u32 idx; - - if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) - return -E2BIG; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.pad) || - XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) - return -EINVAL; - - idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); - err = xe_oa_user_extension_funcs[idx](oa, extension, param); - if (XE_IOCTL_DBG(oa->xe, err)) - return err; - - if (ext.next_extension) - return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param); - - return 0; -} - /** * xe_oa_stream_open_ioctl - Opens an OA stream * @dev: @drm_device @@ -1808,7 +2056,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f return -ENODEV; } - ret = xe_oa_user_extensions(oa, data, 0, ¶m); + param.xef = xef; + ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m); if (ret) return ret; @@ -1876,11 +2125,24 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz); } + ret = xe_oa_parse_syncs(oa, ¶m); + if (ret) + goto err_exec_q; + mutex_lock(¶m.hwe->gt->oa.gt_lock); ret = xe_oa_stream_open_ioctl_locked(oa, ¶m); mutex_unlock(¶m.hwe->gt->oa.gt_lock); + if (ret < 0) + goto err_sync_cleanup; + + return ret; + +err_sync_cleanup: + while (param.num_syncs--) + xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]); + kfree(param.syncs); err_exec_q: - if (ret < 0 && param.exec_q) + if (param.exec_q) xe_exec_queue_put(param.exec_q); return ret; } @@ -2351,7 +2613,7 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt) } /* Ensure MMIO trigger remains disabled till there is a stream */ - xe_mmio_write32(gt, u->regs.oa_debug, + xe_mmio_write32(>->mmio, u->regs.oa_debug, oag_configure_mmio_trigger(NULL, false)); /* Set oa_unit_ids now to ensure ids remain contiguous */ diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 8862eca73fbe..fea9d981e414 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -238,5 +238,17 @@ struct xe_oa_stream { /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */ u32 no_preempt; + + /** @xef: xe_file with which the stream was opened */ + struct xe_file *xef; + + /** @last_fence: fence to use in stream destroy when needed */ + struct dma_fence *last_fence; + + /** @num_syncs: size of @syncs array */ + u32 num_syncs; + + /** @syncs: syncs to wait on and to signal */ + struct xe_sync_entry *syncs; }; #endif diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index f291a1730024..30fdbdb9341e 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -100,6 +100,10 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { * Reserved entries should be programmed with the maximum caching, minimum * coherency (which matches an all-0's encoding), so we can just omit them * in the table. + * + * Note: There is an implicit assumption in the driver that compression and + * coh_1way+ are mutually exclusive. If this is ever not true then userptr + * and imported dma-buf from external device will have uncleared ccs state. */ #define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \ { \ @@ -109,7 +113,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = { REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \ REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \ REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \ - .coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \ + .coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \ + XE_COH_AT_LEAST_1WAY : XE_COH_NONE \ } static const struct xe_pat_table_entry xe2_pat_table[] = { @@ -160,7 +165,7 @@ static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[ for (int i = 0; i < n_entries; i++) { struct xe_reg reg = XE_REG(_PAT_INDEX(i)); - xe_mmio_write32(gt, reg, table[i].value); + xe_mmio_write32(>->mmio, reg, table[i].value); } } @@ -177,25 +182,24 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta static void xelp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); for (i = 0; i < xe->pat.n_entries; i++) { - u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + u32 pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat); drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i, XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xelp_pat_ops = { @@ -206,11 +210,12 @@ static const struct xe_pat_ops xelp_pat_ops = { static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -224,9 +229,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p) XELP_MEM_TYPE_STR_MAP[mem_type], pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehp_pat_ops = { @@ -237,11 +240,12 @@ static const struct xe_pat_ops xehp_pat_ops = { static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -253,9 +257,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xehpc_pat_ops = { @@ -266,11 +268,12 @@ static const struct xe_pat_ops xehpc_pat_ops = { static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); @@ -278,7 +281,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) u32 pat; if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); @@ -287,9 +290,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat); } - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } /* @@ -316,27 +317,28 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry int n_entries) { program_pat(gt, table, n_entries); - xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value); + xe_mmio_write32(>->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value); if (IS_DGFX(gt_to_xe(gt))) - xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value); + xe_mmio_write32(>->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value); } static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - int i, err; + unsigned int fw_ref; u32 pat; + int i; - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - goto err_fw; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return; drm_printf(p, "PAT table:\n"); for (i = 0; i < xe->pat.n_entries; i++) { if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i))); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_INDEX(i))); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i))); @@ -355,7 +357,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) * PPGTT entries. */ if (xe_gt_is_media_type(gt)) - pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA)); + pat = xe_mmio_read32(>->mmio, XE_REG(_PAT_PTA)); else pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA)); @@ -369,9 +371,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p) REG_FIELD_GET(XE2_COH_MODE, pat), pat); - err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); -err_fw: - xe_assert(xe, !err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); } static const struct xe_pat_ops xe2_pat_ops = { @@ -382,7 +382,7 @@ static const struct xe_pat_ops xe2_pat_ops = { void xe_pat_init_early(struct xe_device *xe) { - if (GRAPHICS_VER(xe) == 20) { + if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) { xe->pat.ops = &xe2_pat_ops; xe->pat.table = xe2_pat_table; diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 5e962e72c97e..e6640283893f 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -13,7 +13,7 @@ #include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> -#include <drm/intel/xe_pciids.h> +#include <drm/intel/pciids.h> #include "display/xe_display.h" #include "regs/xe_gt_regs.h" @@ -103,7 +103,6 @@ static const struct xe_graphics_desc graphics_xelpp = { #define XE_HP_FEATURES \ .has_range_tlb_invalidation = true, \ - .has_flat_ccs = true, \ .dma_mask_size = 46, \ .va_bits = 48, \ .vm_max_level = 3 @@ -120,6 +119,8 @@ static const struct xe_graphics_desc graphics_xehpg = { XE_HP_FEATURES, .vram_flags = XE_VRAM_FLAGS_NEED64K, + + .has_flat_ccs = 1, }; static const struct xe_graphics_desc graphics_xehpc = { @@ -145,7 +146,6 @@ static const struct xe_graphics_desc graphics_xehpc = { .has_asid = 1, .has_atomic_enable_pte_bit = 1, - .has_flat_ccs = 0, .has_usm = 1, }; @@ -156,7 +156,6 @@ static const struct xe_graphics_desc graphics_xelpg = { BIT(XE_HW_ENGINE_CCS0), XE_HP_FEATURES, - .has_flat_ccs = 0, }; #define XE2_GFX_FEATURES \ @@ -209,7 +208,7 @@ static const struct xe_media_desc media_xelpmp = { }; static const struct xe_media_desc media_xe2 = { - .name = "Xe2_LPM / Xe2_HPM", + .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM", .hw_engine_mask = GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | @@ -234,7 +233,7 @@ static const struct xe_device_desc rkl_desc = { .require_force_probe = true, }; -static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 }; +static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; static const struct xe_device_desc adl_s_desc = { .graphics = &graphics_xelp, @@ -249,7 +248,7 @@ static const struct xe_device_desc adl_s_desc = { }, }; -static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 }; +static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; static const struct xe_device_desc adl_p_desc = { .graphics = &graphics_xelp, @@ -286,9 +285,9 @@ static const struct xe_device_desc dg1_desc = { .require_force_probe = true, }; -static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 }; -static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 }; -static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 }; +static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; +static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; +static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; #define DG2_FEATURES \ DGFX_FEATURES, \ @@ -347,6 +346,12 @@ static const struct xe_device_desc bmg_desc = { .has_heci_cscfi = 1, }; +static const struct xe_device_desc ptl_desc = { + PLATFORM(PANTHERLAKE), + .has_display = true, + .require_force_probe = true, +}; + #undef PLATFORM __diag_pop(); @@ -357,6 +362,8 @@ static const struct gmdid_map graphics_ip_map[] = { { 1274, &graphics_xelpg }, /* Xe_LPG+ */ { 2001, &graphics_xe2 }, { 2004, &graphics_xe2 }, + { 3000, &graphics_xe2 }, + { 3001, &graphics_xe2 }, }; /* Map of GMD_ID values to media IP */ @@ -364,13 +371,9 @@ static const struct gmdid_map media_ip_map[] = { { 1300, &media_xelpmp }, { 1301, &media_xe2 }, { 2000, &media_xe2 }, + { 3000, &media_xe2 }, }; -#define INTEL_VGA_DEVICE(id, info) { \ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \ - PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \ - (unsigned long) info } - /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -378,25 +381,26 @@ static const struct gmdid_map media_ip_map[] = { * PCI ID matches, otherwise we'll use the wrong info struct above. */ static const struct pci_device_id pciidlist[] = { - XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), - XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), - XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), - XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), - XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), - XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), - XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), - XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), - XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), - XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), - XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), - XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), - XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), + INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), + INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), + INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), + INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), + INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), + INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), + INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), + INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), + INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), + INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), + INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), + INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), + INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), + INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), { } }; MODULE_DEVICE_TABLE(pci, pciidlist); -#undef INTEL_VGA_DEVICE - /* is device_id present in comma separated list of ids */ static bool device_id_in_list(u16 device_id, const char *devices, bool negative) { @@ -467,13 +471,15 @@ enum xe_gmdid_type { static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) { - struct xe_gt *gt = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct xe_reg gmdid_reg = GMD_ID; u32 val; KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); if (IS_SRIOV_VF(xe)) { + struct xe_gt *gt = xe_root_mmio_gt(xe); + /* * To get the value of the GMDID register, VFs must obtain it * from the GuC using MMIO communication. @@ -509,14 +515,17 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, gt->info.type = XE_GT_TYPE_UNINITIALIZED; } else { /* - * We need to apply the GSI offset explicitly here as at this - * point the xe_gt is not fully uninitialized and only basic - * access to MMIO registers is possible. + * GMD_ID is a GT register, but at this point in the driver + * init we haven't fully initialized the GT yet so we need to + * read the register with the tile's MMIO accessor. That means + * we need to apply the GSI offset manually since it won't get + * automatically added as it would if we were using a GT mmio + * accessor. */ if (type == GMDID_MEDIA) gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; - val = xe_mmio_read32(gt, gmdid_reg); + val = xe_mmio_read32(mmio, gmdid_reg); } *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); @@ -678,7 +687,10 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; if (xe->info.platform != XE_PVC) xe->info.has_device_atomics_on_smem = 1; + + /* Runtime detection may change this later */ xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; + xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; xe->info.has_usm = graphics_desc->has_usm; @@ -707,6 +719,7 @@ static int xe_info_init(struct xe_device *xe, gt->info.type = XE_GT_TYPE_MAIN; gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; gt->info.engine_mask = graphics_desc->hw_engine_mask; + if (MEDIA_VER(xe) < 13 && media_desc) gt->info.engine_mask |= media_desc->hw_engine_mask; @@ -725,8 +738,6 @@ static int xe_info_init(struct xe_device *xe, gt->info.type = XE_GT_TYPE_MEDIA; gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; gt->info.engine_mask = media_desc->hw_engine_mask; - gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; - gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; /* * FIXME: At the moment multi-tile and standalone media are @@ -757,6 +768,25 @@ static void xe_pci_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +/* + * Probe the PCI device, initialize various parts of the driver. + * + * Fault injection is used to test the error paths of some initialization + * functions called either directly from xe_pci_probe() or indirectly for + * example through xe_device_probe(). Those functions use the kernel fault + * injection capabilities infrastructure, see + * Documentation/fault-injection/fault-injection.rst for details. The macro + * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution + * at runtime and use a provided return value. The first requirement for + * error injectable functions is proper handling of the error code by the + * caller for recovery, which is always the case here. The second + * requirement is that no state is changed before the first error return. + * It is not strictly fullfilled for all initialization functions using the + * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those + * error cases at probe time, the error code is simply propagated up by the + * caller. Therefore there is no consequence on those specific callers when + * function error injection skips the whole function. + */ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct xe_device_desc *desc = (const void *)ent->driver_data; diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 7397d556996a..d95d9835de42 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -44,7 +44,7 @@ static int pcode_mailbox_status(struct xe_tile *tile) [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, }; - err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK; + err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK; if (err) { drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err, err_decode[err].str ?: "Unknown"); @@ -58,7 +58,7 @@ static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *d unsigned int timeout_ms, bool return_data, bool atomic) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; int err; if (tile_to_xe(tile)->info.skip_pcode) diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h index 79b7042c4534..d08574c4cdb8 100644 --- a/drivers/gpu/drm/xe/xe_platform_types.h +++ b/drivers/gpu/drm/xe/xe_platform_types.h @@ -23,6 +23,7 @@ enum xe_platform { XE_METEORLAKE, XE_LUNARLAKE, XE_BATTLEMAGE, + XE_PANTHERLAKE, }; enum xe_subplatform { diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 33eb039053e4..40f7c844ed44 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -5,6 +5,7 @@ #include "xe_pm.h" +#include <linux/fault-inject.h> #include <linux/pm_runtime.h> #include <drm/drm_managed.h> @@ -263,6 +264,7 @@ int xe_pm_init_early(struct xe_device *xe) return 0; } +ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ /** * xe_pm_init - Initialize Xe Power Management diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 848da8e68c7a..170ae72d1a7b 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -9,6 +9,7 @@ #include <linux/sched/clock.h> #include <drm/ttm/ttm_placement.h> +#include <generated/xe_wa_oob.h> #include <uapi/drm/xe_drm.h> #include "regs/xe_engine_regs.h" @@ -23,6 +24,7 @@ #include "xe_macros.h" #include "xe_mmio.h" #include "xe_ttm_vram_mgr.h" +#include "xe_wa.h" static const u16 xe_to_user_engine_class[] = { [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, @@ -83,24 +85,22 @@ static __ktime_func_t __clock_id_to_func(clockid_t clk_id) } static void -__read_timestamps(struct xe_gt *gt, - struct xe_reg lower_reg, - struct xe_reg upper_reg, - u64 *engine_ts, - u64 *cpu_ts, - u64 *cpu_delta, - __ktime_func_t cpu_clock) +hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, + u64 *cpu_delta, __ktime_func_t cpu_clock) { + struct xe_mmio *mmio = &hwe->gt->mmio; u32 upper, lower, old_upper, loop = 0; + struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), + lower_reg = RING_TIMESTAMP(hwe->mmio_base); - upper = xe_mmio_read32(gt, upper_reg); + upper = xe_mmio_read32(mmio, upper_reg); do { *cpu_delta = local_clock(); *cpu_ts = cpu_clock(); - lower = xe_mmio_read32(gt, lower_reg); + lower = xe_mmio_read32(mmio, lower_reg); *cpu_delta = local_clock() - *cpu_delta; old_upper = upper; - upper = xe_mmio_read32(gt, upper_reg); + upper = xe_mmio_read32(mmio, upper_reg); } while (upper != old_upper && loop++ < 2); *engine_ts = (u64)upper << 32 | lower; @@ -117,6 +117,7 @@ query_engine_cycles(struct xe_device *xe, __ktime_func_t cpu_clock; struct xe_hw_engine *hwe; struct xe_gt *gt; + unsigned int fw_ref; if (query->size == 0) { query->size = size; @@ -149,18 +150,16 @@ query_engine_cycles(struct xe_device *xe, if (!hwe) return -EINVAL; - if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); return -EIO; + } - __read_timestamps(gt, - RING_TIMESTAMP(hwe->mmio_base), - RING_TIMESTAMP_UDW(hwe->mmio_base), - &resp.engine_cycles, - &resp.cpu_timestamp, - &resp.cpu_delta, - cpu_clock); + hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, + &resp.cpu_delta, cpu_clock); - xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_put(gt_to_fw(gt), fw_ref); if (GRAPHICS_VER(xe) >= 20) resp.width = 64; @@ -168,16 +167,10 @@ query_engine_cycles(struct xe_device *xe, resp.width = 36; /* Only write to the output fields of user query */ - if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp)) - return -EFAULT; - - if (put_user(resp.cpu_delta, &query_ptr->cpu_delta)) - return -EFAULT; - - if (put_user(resp.engine_cycles, &query_ptr->engine_cycles)) - return -EFAULT; - - if (put_user(resp.width, &query_ptr->width)) + if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || + put_user(resp.cpu_delta, &query_ptr->cpu_delta) || + put_user(resp.engine_cycles, &query_ptr->engine_cycles) || + put_user(resp.width, &query_ptr->width)) return -EFAULT; return 0; @@ -458,12 +451,23 @@ static int query_hwconfig(struct xe_device *xe, static size_t calc_topo_query_size(struct xe_device *xe) { - return xe->info.gt_count * - (4 * sizeof(struct drm_xe_query_topology_mask) + - sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + - sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + - sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) + - sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss)); + struct xe_gt *gt; + size_t query_size = 0; + int id; + + for_each_gt(gt, xe, id) { + query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + + sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + + sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + + sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); + + /* L3bank mask may not be available for some GTs */ + if (!XE_WA(gt, no_media_l3)) + query_size += sizeof(struct drm_xe_query_topology_mask) + + sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); + } + + return query_size; } static int copy_mask(void __user **ptr, @@ -516,11 +520,18 @@ static int query_gt_topology(struct xe_device *xe, if (err) return err; - topo.type = DRM_XE_TOPO_L3_BANK; - err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, - sizeof(gt->fuse_topo.l3_bank_mask)); - if (err) - return err; + /* + * If the kernel doesn't have a way to obtain a correct L3bank + * mask, then it's better to omit L3 from the query rather than + * reporting bogus or zeroed information to userspace. + */ + if (!XE_WA(gt, no_media_l3)) { + topo.type = DRM_XE_TOPO_L3_BANK; + err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, + sizeof(gt->fuse_topo.l3_bank_mask)); + if (err) + return err; + } topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? DRM_XE_TOPO_SIMD16_EU_PER_DSS : @@ -659,7 +670,7 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_id = u->oa_unit_id; du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); - du->capabilities = DRM_XE_OA_CAPS_BASE; + du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 440ac572f6e5..e1a0e27cda14 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -15,6 +15,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" +#include "xe_device.h" #include "xe_device_types.h" #include "xe_force_wake.h" #include "xe_gt.h" @@ -164,7 +165,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) else if (entry->clr_bits + 1) val = (reg.mcr ? xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(gt, reg)) & (~entry->clr_bits); + xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); else val = 0; @@ -180,34 +181,34 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) if (entry->reg.mcr) xe_gt_mcr_multicast_write(gt, reg_mcr, val); else - xe_mmio_write32(gt, reg, val); + xe_mmio_write32(>->mmio, reg, val); } void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) { struct xe_reg_sr_entry *entry; unsigned long reg; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name); - err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; xa_for_each(&sr->xa, reg, entry) apply_one_mmio(gt, entry); - err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - xe_gt_err(gt, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n"); } void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) @@ -220,15 +221,15 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) u32 mmio_base = hwe->mmio_base; unsigned long reg; unsigned int slot = 0; - int err; + unsigned int fw_ref; if (xa_empty(&sr->xa)) return; drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name); - err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); - if (err) + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) goto err_force_wake; p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL); @@ -241,7 +242,7 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) } xe_reg_whitelist_print_entry(&p, 0, reg, entry); - xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), + xe_mmio_write32(>->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), reg | entry->set_bits); slot++; } @@ -250,16 +251,16 @@ void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe) for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) { u32 addr = RING_NOPID(mmio_base).addr; - xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr); + xe_mmio_write32(>->mmio, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr); } - err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); - XE_WARN_ON(err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); return; err_force_wake: - drm_err(&xe->drm, "Failed to apply, err=%d\n", err); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + drm_err(&xe->drm, "Failed to apply, err=-ETIMEDOUT\n"); } /** diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 86c705d18c0d..b13d4d62f0b1 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -196,7 +196,7 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx, *gt = (*hwe)->gt; *xe = gt_to_xe(*gt); break; - }; + } } /** diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index fe2cb2a96f78..e055bed7ae55 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -53,7 +53,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 if (IS_ERR(bo)) { drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", PTR_ERR(bo)); - return (struct xe_sa_manager *)bo; + return ERR_CAST(bo); } sa_manager->bo = bo; sa_manager->is_iomem = bo->vmap.is_iomem; diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index eeccc1c318ae..1905ca590965 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job) fence = &chain->base; } - job->fence = fence; + job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */ drm_sched_job_arm(&job->drm); } diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index 0d3f76fb05ce..f13f333f00be 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -40,7 +40,6 @@ struct xe_sched_job { * @fence: dma fence to indicate completion. 1 way relationship - job * can safely reference fence, fence cannot safely reference job. */ -#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS struct dma_fence *fence; /** @user_fence: write back value when BB is complete */ struct { @@ -63,7 +62,7 @@ struct xe_sched_job { struct xe_sched_job_snapshot { u16 batch_addr_len; - u64 batch_addr[]; + u64 batch_addr[] __counted_by(batch_addr_len); }; #endif diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 5a1d65e4f19f..ef10782af656 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include "regs/xe_regs.h" @@ -35,7 +37,7 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode) static bool test_is_vf(struct xe_device *xe) { - u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + u32 value = xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG); return value & VF_CAP; } @@ -119,6 +121,7 @@ int xe_sriov_init(struct xe_device *xe) return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe); } +ALLOW_ERROR_INJECTION(xe_sriov_init, ERRNO); /* See xe_pci_probe() */ /** * xe_sriov_print_info - Print basic SR-IOV information. diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 2e72c06fd40d..a90480c6aecf 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -83,6 +83,8 @@ static void user_fence_worker(struct work_struct *w) XE_WARN_ON("Copy to user failed"); kthread_unuse_mm(ufence->mm); mmput(ufence->mm); + } else { + drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n"); } wake_up_all(&ufence->xe->ufence_wq); diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index dda5268507d8..07cf7cfe4abd 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/fault-inject.h> + #include <drm/drm_managed.h> #include "xe_device.h" @@ -129,6 +131,7 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id) return 0; } +ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */ static int tile_ttm_mgr_init(struct xe_tile *tile) { diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 8573d7a87d84..91130ad8999c 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -21,6 +21,7 @@ #include "xe_vm.h" #define __dev_name_xe(xe) dev_name((xe)->drm.dev) +#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile))) #define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt))) #define __dev_name_eq(q) __dev_name_gt((q)->gt) @@ -342,12 +343,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal, ); TRACE_EVENT(xe_reg_rw, - TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len), + TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len), - TP_ARGS(gt, write, reg, val, len), + TP_ARGS(mmio, write, reg, val, len), TP_STRUCT__entry( - __string(dev, __dev_name_gt(gt)) + __string(dev, __dev_name_tile(mmio->tile)) __field(u64, val) __field(u32, reg) __field(u16, write) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index 9b1a1d4304ae..30a3cfbaaa09 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(xe_vm, ), TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), - __entry->vm, __entry->asid) + __entry->vm, __entry->asid) ); DEFINE_EVENT(xe_vm, xe_vm_kill, diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index f7113cf6109d..423856cc18d4 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -60,7 +60,7 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe) static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) { struct xe_tile *tile = xe_device_get_root_tile(xe); - struct xe_gt *mmio = xe_root_mmio_gt(xe); + struct xe_mmio *mmio = xe_root_tile_mmio(xe); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); u64 stolen_size; u64 tile_offset; @@ -94,7 +94,7 @@ static u32 get_wopcm_size(struct xe_device *xe) u32 wopcm_size; u64 val; - val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED); + val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED); val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val); switch (val) { @@ -119,7 +119,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr u32 stolen_size, wopcm_size; u32 ggc, gms; - ggc = xe_mmio_read32(xe_root_mmio_gt(xe), GGC); + ggc = xe_mmio_read32(xe_root_tile_mmio(xe), GGC); /* * Check GGMS: it should be fixed 0x3 (8MB), which corresponds to the @@ -159,7 +159,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr stolen_size -= wopcm_size; if (media_gt && XE_WA(media_gt, 14019821291)) { - u64 gscpsmi_base = xe_mmio_read64_2x32(media_gt, GSCPSMI_BASE) + u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE) & ~GENMASK_ULL(5, 0); /* diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 0d5e04158917..d449de0fb6ec 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -33,7 +33,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, { XE_RTP_NAME("Tuning: L3 cache - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f))) }, @@ -43,7 +43,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { SET(CCCHKNREG1, L3CMPCTRL)) }, { XE_RTP_NAME("Tuning: Compression Overfetch - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX), SET(XE2LPM_CCCHKNREG1, L3CMPCTRL)) }, @@ -52,7 +52,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN)) }, { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"), @@ -61,7 +61,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { COMPMEMRD256BOVRFETCHEN)) }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"), - XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2, COMPMEMRD256BOVRFETCHEN)) }, @@ -71,7 +71,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) }, { XE_RTP_NAME("Tuning: Stateless compression control - media"), - XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)), + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) }, diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index d431d0031185..fb0eda3d5682 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -4,6 +4,7 @@ */ #include <linux/bitfield.h> +#include <linux/fault-inject.h> #include <linux/firmware.h> #include <drm/drm_managed.h> @@ -796,6 +797,7 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw) return err; } +ALLOW_ERROR_INJECTION(xe_uc_fw_init, ERRNO); /* See xe_pci_probe() */ static u32 uc_fw_ggtt_offset(struct xe_uc_fw *uc_fw) { @@ -806,6 +808,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) { struct xe_device *xe = uc_fw_to_xe(uc_fw); struct xe_gt *gt = uc_fw_to_gt(uc_fw); + struct xe_mmio *mmio = >->mmio; u64 src_offset; u32 dma_ctrl; int ret; @@ -814,34 +817,34 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) /* Set the source address for the uCode */ src_offset = uc_fw_ggtt_offset(uc_fw) + uc_fw->css_offset; - xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset)); - xe_mmio_write32(gt, DMA_ADDR_0_HIGH, + xe_mmio_write32(mmio, DMA_ADDR_0_LOW, lower_32_bits(src_offset)); + xe_mmio_write32(mmio, DMA_ADDR_0_HIGH, upper_32_bits(src_offset) | DMA_ADDRESS_SPACE_GGTT); /* Set the DMA destination */ - xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset); - xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + xe_mmio_write32(mmio, DMA_ADDR_1_LOW, offset); + xe_mmio_write32(mmio, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); /* * Set the transfer size. The header plus uCode will be copied to WOPCM * via DMA, excluding any other components */ - xe_mmio_write32(gt, DMA_COPY_SIZE, + xe_mmio_write32(mmio, DMA_COPY_SIZE, sizeof(struct uc_css_header) + uc_fw->ucode_size); /* Start the DMA */ - xe_mmio_write32(gt, DMA_CTRL, + xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_ENABLE(dma_flags | START_DMA)); /* Wait for DMA to finish */ - ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl, + ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl, false); if (ret) drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", xe_uc_fw_type_repr(uc_fw->type), dma_ctrl); /* Disable the bits once DMA is over */ - xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); + xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); return ret; } diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 80ba2fc78837..b1f81dca610d 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -169,7 +169,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) u64 offset_hi, offset_lo; u32 nodes, num_enabled; - reg = xe_mmio_read32(gt, MIRROR_FUSE3); + reg = xe_mmio_read32(>->mmio, MIRROR_FUSE3); nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg); num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */ @@ -185,7 +185,8 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */ /* We don't expect any holes */ - xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size), + xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(>_to_tile(gt)->mmio, GSMBASE) - + ccs_size), "Hole between CCS and GSM.\n"); } else { reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR); @@ -219,8 +220,8 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; + unsigned int fw_ref; u64 offset; - int err; u32 reg; if (IS_SRIOV_VF(xe)) { @@ -239,9 +240,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, return 0; } - err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (err) - return err; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; /* actual size */ if (unlikely(xe->info.platform == XE_DG1)) { @@ -257,13 +258,15 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, if (xe->info.has_flat_ccs) { offset = get_flat_ccs_offset(gt, *tile_size); } else { - offset = xe_mmio_read64_2x32(gt, GSMBASE); + offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); } /* remove the tile offset so we have just the available size */ *vram_size = offset - *tile_offset; - return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + + return 0; } static void vram_fini(void *arg) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 353936a0f877..02cf647f86d8 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -8,6 +8,7 @@ #include <drm/drm_managed.h> #include <kunit/visibility.h> #include <linux/compiler_types.h> +#include <linux/fault-inject.h> #include <generated/xe_wa_oob.h> @@ -251,6 +252,34 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), }, + /* Xe3_LPG */ + + { XE_RTP_NAME("14021871409"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)), + XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS)) + }, + + /* Xe3_LPM */ + + { XE_RTP_NAME("16021867713"), + XE_RTP_RULES(MEDIA_VERSION(3000), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + { XE_RTP_NAME("16021865536"), + XE_RTP_RULES(MEDIA_VERSION(3000), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + { XE_RTP_NAME("14021486841"), + XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + {} }; @@ -567,6 +596,18 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + /* Xe3_LPG */ + + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, + { XE_RTP_NAME("18034896535"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH)) + }, + {} }; @@ -742,6 +783,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, + /* Xe3_LPG */ + { XE_RTP_NAME("14021490052"), + XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0), + ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(FF_MODE, + DIS_MESH_PARTIAL_AUTOSTRIP | + DIS_MESH_AUTOSTRIP), + SET(VFLSKPD, + DIS_PARTIAL_AUTOSTRIP | + DIS_AUTOSTRIP)) + }, + {} }; @@ -854,6 +907,7 @@ int xe_wa_init(struct xe_gt *gt) return 0; } +ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) { @@ -891,11 +945,11 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) */ void xe_wa_apply_tile_workarounds(struct xe_tile *tile) { - struct xe_gt *mmio = tile->primary_gt; + struct xe_mmio *mmio = &tile->mmio; if (IS_SRIOV_VF(tile->xe)) return; - if (XE_WA(mmio, 22010954014)) + if (XE_WA(tile->primary_gt, 22010954014)) xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); } diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 920ca5060146..bcd04464b85e 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -33,7 +33,11 @@ GRAPHICS_VERSION(2004) 22019338487 MEDIA_VERSION(2000) GRAPHICS_VERSION(2001) + MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) 22019338487_display PLATFORM(LUNARLAKE) 16023588340 GRAPHICS_VERSION(2001) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) +no_media_l3 MEDIA_VERSION(3000) +14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0) + MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c index d3a99157e523..ada0d0aa6b74 100644 --- a/drivers/gpu/drm/xe/xe_wopcm.c +++ b/drivers/gpu/drm/xe/xe_wopcm.c @@ -5,6 +5,8 @@ #include "xe_wopcm.h" +#include <linux/fault-inject.h> + #include "regs/xe_guc_regs.h" #include "xe_device.h" #include "xe_force_wake.h" @@ -123,8 +125,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size, static bool __wopcm_regs_locked(struct xe_gt *gt, u32 *guc_wopcm_base, u32 *guc_wopcm_size) { - u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET); - u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE); + u32 reg_base = xe_mmio_read32(>->mmio, DMA_GUC_WOPCM_OFFSET); + u32 reg_size = xe_mmio_read32(>->mmio, GUC_WOPCM_SIZE); if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) || !(reg_base & GUC_WOPCM_OFFSET_VALID)) @@ -150,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt, XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK); mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; - err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask, + err = xe_mmio_write32_and_verify(>->mmio, GUC_WOPCM_SIZE, size, mask, size | GUC_WOPCM_SIZE_LOCKED); if (err) goto err_out; mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; - err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET, + err = xe_mmio_write32_and_verify(>->mmio, DMA_GUC_WOPCM_OFFSET, base | huc_agent, mask, base | huc_agent | GUC_WOPCM_OFFSET_VALID); @@ -169,10 +171,10 @@ err_out: drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n"); drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", DMA_GUC_WOPCM_OFFSET.addr, - xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET)); + xe_mmio_read32(>->mmio, DMA_GUC_WOPCM_OFFSET)); drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", GUC_WOPCM_SIZE.addr, - xe_mmio_read32(gt, GUC_WOPCM_SIZE)); + xe_mmio_read32(>->mmio, GUC_WOPCM_SIZE)); return err; } @@ -268,3 +270,4 @@ check: return ret; } +ALLOW_ERROR_INJECTION(xe_wopcm_init, ERRNO); /* See xe_pci_probe() */ diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index 626e5ac4c33d..4197f44e202f 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -6,6 +6,7 @@ config DRM_ZYNQMP_DPSUB depends on PHY_XILINX_ZYNQMP depends on XILINX_ZYNQMP_DPDMA select DMA_ENGINE + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index 9368acf56eaf..e4e0e299e8a7 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1200,6 +1200,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp, { unsigned int i; + if (!layer->info) + return; + for (i = 0; i < layer->info->num_channels; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 129beac4c073..25c5dc61ee88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -18,7 +18,9 @@ #include <drm/drm_modes.h> #include <drm/drm_of.h> +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/io.h> @@ -51,6 +53,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_LANE_COUNT_SET 0x4 #define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8 #define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc +#define ZYNQMP_DP_LINK_QUAL_PATTERN_SET 0x10 #define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14 #define ZYNQMP_DP_DOWNSPREAD_CTL 0x18 #define ZYNQMP_DP_SOFTWARE_RESET 0x1c @@ -64,6 +67,9 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \ ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \ ZYNQMP_DP_SOFTWARE_RESET_AUX) +#define ZYNQMP_DP_COMP_PATTERN_80BIT_1 0x20 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_2 0x24 +#define ZYNQMP_DP_COMP_PATTERN_80BIT_3 0x28 /* Core enable registers */ #define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80 @@ -207,6 +213,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3) #define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf +#define ZYNQMP_DP_TRANSMIT_PRBS7 0x230 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c #define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240 #define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244 @@ -275,30 +282,108 @@ struct zynqmp_dp_config { }; /** + * enum test_pattern - Test patterns for test testing + * @TEST_VIDEO: Use regular video input + * @TEST_SYMBOL_ERROR: Symbol error measurement pattern + * @TEST_PRBS7: Output of the PRBS7 (x^7 + x^6 + 1) polynomial + * @TEST_80BIT_CUSTOM: A custom 80-bit pattern + * @TEST_CP2520: HBR2 compliance eye pattern + * @TEST_TPS1: Link training symbol pattern TPS1 (/D10.2/) + * @TEST_TPS2: Link training symbol pattern TPS2 + * @TEST_TPS3: Link training symbol pattern TPS3 (for HBR2) + */ +enum test_pattern { + TEST_VIDEO, + TEST_TPS1, + TEST_TPS2, + TEST_TPS3, + TEST_SYMBOL_ERROR, + TEST_PRBS7, + TEST_80BIT_CUSTOM, + TEST_CP2520, +}; + +static const char *const test_pattern_str[] = { + [TEST_VIDEO] = "video", + [TEST_TPS1] = "tps1", + [TEST_TPS2] = "tps2", + [TEST_TPS3] = "tps3", + [TEST_SYMBOL_ERROR] = "symbol-error", + [TEST_PRBS7] = "prbs7", + [TEST_80BIT_CUSTOM] = "80bit-custom", + [TEST_CP2520] = "cp2520", +}; + +/** + * struct zynqmp_dp_test - Configuration for test mode + * @pattern: The test pattern + * @enhanced: Use enhanced framing + * @downspread: Use SSC + * @active: Whether test mode is active + * @custom: Custom pattern for %TEST_80BIT_CUSTOM + * @train_set: Voltage/preemphasis settings + * @bw_code: Bandwidth code for the link + * @link_cnt: Number of lanes + */ +struct zynqmp_dp_test { + enum test_pattern pattern; + bool enhanced, downspread, active; + u8 custom[10]; + u8 train_set[ZYNQMP_DP_MAX_LANES]; + u8 bw_code; + u8 link_cnt; +}; + +/** + * struct zynqmp_dp_train_set_priv - Private data for train_set debugfs files + * @dp: DisplayPort IP core structure + * @lane: The lane for this file + */ +struct zynqmp_dp_train_set_priv { + struct zynqmp_dp *dp; + int lane; +}; + +/** * struct zynqmp_dp - Xilinx DisplayPort core * @dev: device structure * @dpsub: Display subsystem * @iomem: device I/O memory for register access * @reset: reset controller + * @lock: Mutex protecting this struct and register access (but not AUX) * @irq: irq * @bridge: DRM bridge for the DP encoder * @next_bridge: The downstream bridge + * @test: Configuration for test mode * @config: IP core configuration from DTS * @aux: aux channel + * @aux_done: Completed when we get an AUX reply or timeout + * @ignore_aux_errors: If set, AUX errors are suppressed * @phy: PHY handles for DP lanes * @num_lanes: number of enabled phy lanes * @hpd_work: hot plug detection worker + * @hpd_irq_work: hot plug detection IRQ worker + * @ignore_hpd: If set, HPD events and IRQs are ignored * @status: connection status * @enabled: flag to indicate if the device is enabled * @dpcd: DP configuration data from currently connected sink device * @link_config: common link configuration between IP core and sink device * @mode: current mode between IP core and sink device * @train_set: set of training data + * @debugfs_train_set: Debugfs private data for @train_set + * + * @lock covers the link configuration in this struct and the device's + * registers. It does not cover @aux or @ignore_aux_errors. It is not strictly + * required for any of the members which are only modified at probe/remove time + * (e.g. @dev). */ struct zynqmp_dp { struct drm_dp_aux aux; struct drm_bridge bridge; struct work_struct hpd_work; + struct work_struct hpd_irq_work; + struct completion aux_done; + struct mutex lock; struct drm_bridge *next_bridge; struct device *dev; @@ -310,9 +395,13 @@ struct zynqmp_dp { enum drm_connector_status status; int irq; bool enabled; + bool ignore_aux_errors; + bool ignore_hpd; + struct zynqmp_dp_train_set_priv debugfs_train_set[ZYNQMP_DP_MAX_LANES]; struct zynqmp_dp_mode mode; struct zynqmp_dp_link_config link_config; + struct zynqmp_dp_test test; struct zynqmp_dp_config config; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 train_set[ZYNQMP_DP_MAX_LANES]; @@ -626,6 +715,7 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, /** * zynqmp_dp_update_vs_emph - Update the training values * @dp: DisplayPort IP core structure + * @train_set: A set of training values * * Update the training values based on the request from sink. The mapped values * are predefined, and values(vs, pe, pc) are from the device manual. @@ -633,12 +723,12 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp, * Return: 0 if vs and emph are updated successfully, or the error code returned * by drm_dp_dpcd_write(). */ -static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) +static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp, u8 *train_set) { unsigned int i; int ret; - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set, + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->mode.lane_cnt); if (ret < 0) return ret; @@ -646,7 +736,7 @@ static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp) for (i = 0; i < dp->mode.lane_cnt; i++) { u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4; union phy_configure_opts opts = { 0 }; - u8 train = dp->train_set[i]; + u8 train = train_set[i]; opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; @@ -690,7 +780,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp) * So, This loop should exit before 512 iterations */ for (max_tries = 0; max_tries < 512; max_tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -755,7 +845,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) return ret; for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) { - ret = zynqmp_dp_update_vs_emph(dp); + ret = zynqmp_dp_update_vs_emph(dp, dp->train_set); if (ret) return ret; @@ -778,28 +868,29 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp) } /** - * zynqmp_dp_train - Train the link + * zynqmp_dp_setup() - Set up major link parameters * @dp: DisplayPort IP core structure + * @bw_code: The link bandwidth as a multiple of 270 MHz + * @lane_cnt: The number of lanes to use + * @enhanced: Use enhanced framing + * @downspread: Enable spread-spectrum clocking * - * Return: 0 if all trains are done successfully, or corresponding error code. + * Return: 0 on success, or -errno on failure */ -static int zynqmp_dp_train(struct zynqmp_dp *dp) +static int zynqmp_dp_setup(struct zynqmp_dp *dp, u8 bw_code, u8 lane_cnt, + bool enhanced, bool downspread) { u32 reg; - u8 bw_code = dp->mode.bw_code; - u8 lane_cnt = dp->mode.lane_cnt; u8 aux_lane_cnt = lane_cnt; - bool enhanced; int ret; zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt); - enhanced = drm_dp_enhanced_frame_cap(dp->dpcd); if (enhanced) { zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1); aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN; } - if (dp->dpcd[3] & 0x1) { + if (downspread) { zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1); drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); @@ -842,8 +933,24 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp) } zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg); - ret = zynqmp_dp_phy_ready(dp); - if (ret < 0) + return zynqmp_dp_phy_ready(dp); +} + +/** + * zynqmp_dp_train - Train the link + * @dp: DisplayPort IP core structure + * + * Return: 0 if all trains are done successfully, or corresponding error code. + */ +static int zynqmp_dp_train(struct zynqmp_dp *dp) +{ + int ret; + + ret = zynqmp_dp_setup(dp, dp->mode.bw_code, dp->mode.lane_cnt, + drm_dp_enhanced_frame_cap(dp->dpcd), + dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + if (ret) return ret; zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1); @@ -934,12 +1041,15 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, u8 *buf, u8 bytes, u8 *reply) { bool is_read = (cmd & AUX_READ_BIT) ? true : false; + unsigned long time_left; u32 reg, i; reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST) return -EBUSY; + reinit_completion(&dp->aux_done); + zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr); if (!is_read) for (i = 0; i < bytes; i++) @@ -954,17 +1064,14 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr, zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg); /* Wait for reply to be delivered upto 2ms */ - for (i = 0; ; i++) { - reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) - break; - - if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT || - i == 2) - return -ETIMEDOUT; + time_left = wait_for_completion_timeout(&dp->aux_done, + msecs_to_jiffies(2)); + if (!time_left) + return -ETIMEDOUT; - usleep_range(1000, 1100); - } + reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE); + if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + return -ETIMEDOUT; reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE); if (reply) @@ -1006,6 +1113,8 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (dp->status == connector_status_disconnected) { dev_dbg(dp->dev, "no connected aux device\n"); + if (dp->ignore_aux_errors) + goto fake_response; return -ENODEV; } @@ -1014,7 +1123,13 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret); - return ret; + if (!dp->ignore_aux_errors) + return ret; + +fake_response: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + memset(msg->buffer, 0, msg->size); + return msg->size; } /** @@ -1048,6 +1163,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) (w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) | (rate / (1000 * 1000))); + zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); + dp->aux.name = "ZynqMP DP AUX"; dp->aux.dev = dp->dev; dp->aux.drm_dev = dp->bridge.dev; @@ -1065,6 +1183,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp) static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp) { drm_dp_aux_unregister(&dp->aux); + + zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_REPLY_RECEIVED | + ZYNQMP_DP_INT_REPLY_TIMEOUT); } /* ----------------------------------------------------------------------------- @@ -1386,8 +1507,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, } /* Check with link rate and lane count */ + mutex_lock(&dp->lock); rate = zynqmp_dp_max_rate(dp->link_config.max_rate, dp->link_config.max_lanes, dp->config.bpp); + mutex_unlock(&dp->lock); if (mode->clock > rate) { dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n", mode->name); @@ -1414,6 +1537,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, pm_runtime_get_sync(dp->dev); + mutex_lock(&dp->lock); zynqmp_dp_disp_enable(dp, old_bridge_state); /* @@ -1474,6 +1598,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_SOFTWARE_RESET, ZYNQMP_DP_SOFTWARE_RESET_ALL); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1); + mutex_unlock(&dp->lock); } static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, @@ -1481,6 +1606,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct zynqmp_dp *dp = bridge_to_dp(bridge); + mutex_lock(&dp->lock); dp->enabled = false; cancel_work(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0); @@ -1491,6 +1617,7 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge, zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0); zynqmp_dp_disp_disable(dp, old_bridge_state); + mutex_unlock(&dp->lock); pm_runtime_put_sync(dp->dev); } @@ -1526,13 +1653,14 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp) { - struct zynqmp_dp *dp = bridge_to_dp(bridge); struct zynqmp_dp_link_config *link_config = &dp->link_config; u32 state, i; int ret; + lockdep_assert_held(&dp->lock); + /* * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to * get the HPD signal with some monitors. @@ -1568,6 +1696,18 @@ disconnected: return connector_status_disconnected; } +static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + enum drm_connector_status ret; + + mutex_lock(&dp->lock); + ret = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + + return ret; +} + static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { @@ -1605,6 +1745,582 @@ zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge, return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts); } +/* ----------------------------------------------------------------------------- + * debugfs + */ + +/** + * zynqmp_dp_set_test_pattern() - Configure the link for a test pattern + * @dp: DisplayPort IP core structure + * @pattern: The test pattern to configure + * @custom: The custom pattern to use if @pattern is %TEST_80BIT_CUSTOM + * + * Return: 0 on success, or negative errno on (DPCD) failure + */ +static int zynqmp_dp_set_test_pattern(struct zynqmp_dp *dp, + enum test_pattern pattern, + u8 *const custom) +{ + bool scramble = false; + u32 train_pattern = 0; + u32 link_pattern = 0; + u8 dpcd_train = 0; + u8 dpcd_link = 0; + int ret; + + switch (pattern) { + case TEST_TPS1: + train_pattern = 1; + break; + case TEST_TPS2: + train_pattern = 2; + break; + case TEST_TPS3: + train_pattern = 3; + break; + case TEST_SYMBOL_ERROR: + scramble = true; + link_pattern = DP_PHY_TEST_PATTERN_ERROR_COUNT; + break; + case TEST_PRBS7: + /* We use a dedicated register to enable PRBS7 */ + dpcd_link = DP_LINK_QUAL_PATTERN_ERROR_RATE; + break; + case TEST_80BIT_CUSTOM: { + const u8 *p = custom; + + link_pattern = DP_LINK_QUAL_PATTERN_80BIT_CUSTOM; + + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_1, + (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_2, + (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]); + zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_3, + (p[9] << 8) | p[8]); + break; + } + case TEST_CP2520: + link_pattern = DP_LINK_QUAL_PATTERN_CP2520_PAT_1; + break; + default: + WARN_ON_ONCE(1); + fallthrough; + case TEST_VIDEO: + scramble = true; + } + + zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, !scramble); + zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, train_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_LINK_QUAL_PATTERN_SET, link_pattern); + zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMIT_PRBS7, pattern == TEST_PRBS7); + + dpcd_link = dpcd_link ?: link_pattern; + dpcd_train = train_pattern; + if (!scramble) + dpcd_train |= DP_LINK_SCRAMBLING_DISABLE; + + if (dp->dpcd[DP_DPCD_REV] < 0x12) { + if (pattern == TEST_CP2520) + dev_warn(dp->dev, + "can't set sink link quality pattern to CP2520 for DPCD < r1.2; error counters will be invalid\n"); + else + dpcd_train |= FIELD_PREP(DP_LINK_QUAL_PATTERN_11_MASK, + dpcd_link); + } else { + u8 dpcd_link_lane[ZYNQMP_DP_MAX_LANES]; + + memset(dpcd_link_lane, dpcd_link, ZYNQMP_DP_MAX_LANES); + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_QUAL_LANE0_SET, + dpcd_link_lane, ZYNQMP_DP_MAX_LANES); + if (ret < 0) + return ret; + } + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, dpcd_train); + return ret < 0 ? ret : 0; +} + +static int zynqmp_dp_test_setup(struct zynqmp_dp *dp) +{ + return zynqmp_dp_setup(dp, dp->test.bw_code, dp->test.link_cnt, + dp->test.enhanced, dp->test.downspread); +} + +static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = snprintf(buf, sizeof(buf), "%s\n", + test_pattern_str[dp->test.pattern]); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return simple_read_from_buffer(user_buf, count, ppos, buf, ret); +} + +static ssize_t zynqmp_dp_pattern_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + char buf[16]; + ssize_t ret; + int pattern; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, + count); + if (ret < 0) + goto out; + buf[ret] = '\0'; + + pattern = sysfs_match_string(test_pattern_str, buf); + if (pattern < 0) { + ret = -EINVAL; + goto out; + } + + mutex_lock(&dp->lock); + dp->test.pattern = pattern; + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_pattern = { + .read = zynqmp_dp_pattern_read, + .write = zynqmp_dp_pattern_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_enhanced_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.enhanced; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_enhanced_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.enhanced = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get, + zynqmp_dp_enhanced_set, "%llu\n"); + +static int zynqmp_dp_downspread_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.downspread; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_downspread_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + dp->test.downspread = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get, + zynqmp_dp_downspread_set, "%llu\n"); + +static int zynqmp_dp_active_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.active; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_active_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + mutex_lock(&dp->lock); + if (val) { + if (val < 2) { + ret = zynqmp_dp_test_setup(dp); + if (ret) + goto out; + } + + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom); + if (ret) + goto out; + + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + if (ret) + goto out; + + dp->test.active = true; + } else { + int err; + + dp->test.active = false; + err = zynqmp_dp_set_test_pattern(dp, TEST_VIDEO, NULL); + if (err) + dev_warn(dp->dev, "could not clear test pattern: %d\n", + err); + zynqmp_dp_train_loop(dp); + } +out: + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get, + zynqmp_dp_active_set, "%llu\n"); + +static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + mutex_lock(&dp->lock); + ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom, + sizeof(dp->test.custom)); + mutex_unlock(&dp->lock); + + debugfs_file_put(dentry); + return ret; +} + +static ssize_t zynqmp_dp_custom_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dentry *dentry = file->f_path.dentry; + struct zynqmp_dp *dp = file->private_data; + ssize_t ret; + char buf[sizeof(dp->test.custom)]; + + ret = debugfs_file_get(dentry); + if (unlikely(ret)) + return ret; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (ret < 0) + goto out; + + mutex_lock(&dp->lock); + memcpy(dp->test.custom, buf, ret); + if (dp->test.active) + ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, + dp->test.custom) ?: ret; + mutex_unlock(&dp->lock); + +out: + debugfs_file_put(dentry); + return ret; +} + +static const struct file_operations fops_zynqmp_dp_custom = { + .read = zynqmp_dp_custom_read, + .write = zynqmp_dp_custom_write, + .open = simple_open, + .llseek = noop_llseek, +}; + +static int zynqmp_dp_swing_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_swing_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 3) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_SWING_REACHED | + DP_TRAIN_VOLTAGE_SWING_MASK); + *train_set |= val; + if (val == 3) + *train_set |= DP_TRAIN_MAX_SWING_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get, + zynqmp_dp_swing_set, "%llu\n"); + +static int zynqmp_dp_preemphasis_get(void *data, u64 *val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + + mutex_lock(&dp->lock); + *val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, + dp->test.train_set[priv->lane]); + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_preemphasis_set(void *data, u64 val) +{ + struct zynqmp_dp_train_set_priv *priv = data; + struct zynqmp_dp *dp = priv->dp; + u8 *train_set = &dp->test.train_set[priv->lane]; + int ret = 0; + + if (val > 2) + return -EINVAL; + + mutex_lock(&dp->lock); + *train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | + DP_TRAIN_PRE_EMPHASIS_MASK); + *train_set |= val; + if (val == 2) + *train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + if (dp->test.active) + ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get, + zynqmp_dp_preemphasis_set, "%llu\n"); + +static int zynqmp_dp_lanes_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->test.link_cnt; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_lanes_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int ret = 0; + + if (val > ZYNQMP_DP_MAX_LANES) + return -EINVAL; + + mutex_lock(&dp->lock); + if (val > dp->num_lanes) { + ret = -EINVAL; + } else { + dp->test.link_cnt = val; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + } + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get, + zynqmp_dp_lanes_set, "%llu\n"); + +static int zynqmp_dp_rate_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_rate_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + int link_rate; + int ret = 0; + u8 bw_code; + + if (do_div(val, 10000)) + return -EINVAL; + + bw_code = drm_dp_link_rate_to_bw_code(val); + link_rate = drm_dp_bw_code_to_link_rate(bw_code); + if (val != link_rate) + return -EINVAL; + + if (bw_code != DP_LINK_BW_1_62 && bw_code != DP_LINK_BW_2_7 && + bw_code != DP_LINK_BW_5_4) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->test.bw_code = bw_code; + if (dp->test.active) + ret = zynqmp_dp_test_setup(dp); + mutex_unlock(&dp->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get, + zynqmp_dp_rate_set, "%llu\n"); + +static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->aux.hw_mutex); + *val = dp->ignore_aux_errors; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->aux.hw_mutex); + dp->ignore_aux_errors = val; + mutex_unlock(&dp->aux.hw_mutex); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_aux_errors, + zynqmp_dp_ignore_aux_errors_get, + zynqmp_dp_ignore_aux_errors_set, "%llu\n"); + +static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val) +{ + struct zynqmp_dp *dp = data; + + mutex_lock(&dp->lock); + *val = dp->ignore_hpd; + mutex_unlock(&dp->lock); + return 0; +} + +static int zynqmp_dp_ignore_hpd_set(void *data, u64 val) +{ + struct zynqmp_dp *dp = data; + + if (val != !!val) + return -EINVAL; + + mutex_lock(&dp->lock); + dp->ignore_hpd = val; + mutex_lock(&dp->lock); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_hpd, zynqmp_dp_ignore_hpd_get, + zynqmp_dp_ignore_hpd_set, "%llu\n"); + +static void zynqmp_dp_bridge_debugfs_init(struct drm_bridge *bridge, + struct dentry *root) +{ + struct zynqmp_dp *dp = bridge_to_dp(bridge); + struct dentry *test; + int i; + + dp->test.bw_code = DP_LINK_BW_5_4; + dp->test.link_cnt = dp->num_lanes; + + test = debugfs_create_dir("test", root); +#define CREATE_FILE(name) \ + debugfs_create_file(#name, 0600, test, dp, &fops_zynqmp_dp_##name) + CREATE_FILE(pattern); + CREATE_FILE(enhanced); + CREATE_FILE(downspread); + CREATE_FILE(active); + CREATE_FILE(custom); + CREATE_FILE(rate); + CREATE_FILE(lanes); + CREATE_FILE(ignore_aux_errors); + CREATE_FILE(ignore_hpd); + + for (i = 0; i < dp->num_lanes; i++) { + static const char fmt[] = "lane%d_preemphasis"; + char name[sizeof(fmt)]; + + dp->debugfs_train_set[i].dp = dp; + dp->debugfs_train_set[i].lane = i; + + snprintf(name, sizeof(name), fmt, i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_preemphasis); + + snprintf(name, sizeof(name), "lane%d_swing", i); + debugfs_create_file(name, 0600, test, + &dp->debugfs_train_set[i], + &fops_zynqmp_dp_swing); + } +} + static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .attach = zynqmp_dp_bridge_attach, .detach = zynqmp_dp_bridge_detach, @@ -1618,6 +2334,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = { .detect = zynqmp_dp_bridge_detect, .edid_read = zynqmp_dp_bridge_edid_read, .atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts, + .debugfs_init = zynqmp_dp_bridge_debugfs_init, }; /* ----------------------------------------------------------------------------- @@ -1651,10 +2368,46 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work) struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work); enum drm_connector_status status; - status = zynqmp_dp_bridge_detect(&dp->bridge); + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + status = __zynqmp_dp_bridge_detect(dp); + mutex_unlock(&dp->lock); + drm_bridge_hpd_notify(&dp->bridge, status); } +static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work) +{ + struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, + hpd_irq_work); + u8 status[DP_LINK_STATUS_SIZE + 2]; + int err; + + mutex_lock(&dp->lock); + if (dp->ignore_hpd) { + mutex_unlock(&dp->lock); + return; + } + + err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, + DP_LINK_STATUS_SIZE + 2); + if (err < 0) { + dev_dbg_ratelimited(dp->dev, + "could not read sink status: %d\n", err); + } else { + if (status[4] & DP_LINK_STATUS_UPDATED || + !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || + !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { + zynqmp_dp_train_loop(dp); + } + } + mutex_unlock(&dp->lock); +} + static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) { struct zynqmp_dp *dp = (struct zynqmp_dp *)data; @@ -1686,23 +2439,15 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data) if (status & ZYNQMP_DP_INT_HPD_EVENT) schedule_work(&dp->hpd_work); - if (status & ZYNQMP_DP_INT_HPD_IRQ) { - int ret; - u8 status[DP_LINK_STATUS_SIZE + 2]; + if (status & ZYNQMP_DP_INT_HPD_IRQ) + schedule_work(&dp->hpd_irq_work); - ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status, - DP_LINK_STATUS_SIZE + 2); - if (ret < 0) - goto handled; + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY) + complete(&dp->aux_done); - if (status[4] & DP_LINK_STATUS_UPDATED || - !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) || - !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) { - zynqmp_dp_train_loop(dp); - } - } + if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT) + complete(&dp->aux_done); -handled: return IRQ_HANDLED; } @@ -1725,8 +2470,11 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) dp->dev = &pdev->dev; dp->dpsub = dpsub; dp->status = connector_status_disconnected; + mutex_init(&dp->lock); + init_completion(&dp->aux_done); INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func); + INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func); /* Acquire all resources (IOMEM, IRQ and PHYs). */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp"); @@ -1802,9 +2550,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) * Now that the hardware is initialized and won't generate spurious * interrupts, request the IRQ. */ - ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL, - zynqmp_dp_irq_handler, IRQF_ONESHOT, - dev_name(dp->dev), dp); + ret = devm_request_irq(dp->dev, dp->irq, zynqmp_dp_irq_handler, + IRQF_SHARED, dev_name(dp->dev), dp); if (ret < 0) goto err_phy_exit; @@ -1829,8 +2576,9 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) struct zynqmp_dp *dp = dpsub->dp; zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL); - disable_irq(dp->irq); + devm_free_irq(dp->dev, dp->irq, dp); + cancel_work_sync(&dp->hpd_irq_work); cancel_work_sync(&dp->hpd_work); zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0); @@ -1838,4 +2586,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub) zynqmp_dp_phy_exit(dp); zynqmp_dp_reset(dp, true); + mutex_destroy(&dp->lock); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index bd1368df7870..fc81983d9e5e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_blend.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> +#include <drm/drm_client_setup.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> @@ -402,6 +403,7 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = { DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create), + DRM_FBDEV_DMA_DRIVER_OPS, .fops = &zynqmp_dpsub_drm_fops, @@ -509,12 +511,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) if (ret) return ret; - drm_kms_helper_poll_init(drm); - ret = zynqmp_dpsub_kms_init(dpsub); if (ret < 0) goto err_poll_fini; + drm_kms_helper_poll_init(drm); + /* Reset all components and register the DRM device. */ drm_mode_config_reset(drm); @@ -523,7 +525,7 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) goto err_poll_fini; /* Initialize fbdev generic emulation. */ - drm_fbdev_dma_setup(drm, 24); + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888); return 0; @@ -536,7 +538,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub) { struct drm_device *drm = &dpsub->drm->dev; - drm_dev_unregister(drm); + drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); drm_encoder_cleanup(&dpsub->drm->encoder); drm_kms_helper_poll_fini(drm); |