aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vc4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vc4')
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c45
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c41
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h36
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c216
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h34
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c550
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c16
16 files changed, 765 insertions, 401 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index f432278173cd..dc316cb79e00 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -385,14 +385,13 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .get_sg_table = drm_gem_cma_get_sg_table,
.vmap = vc4_prime_vmap,
- .vunmap = drm_gem_cma_prime_vunmap,
.vm_ops = &vc4_vm_ops,
};
/**
- * vc4_gem_create_object - Implementation of driver->gem_create_object.
+ * vc4_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
@@ -468,7 +467,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
- struct drm_printer p = drm_info_printer(vc4->dev->dev);
+ struct drm_printer p = drm_info_printer(vc4->base.dev);
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
@@ -609,7 +608,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev);
@@ -783,19 +782,19 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return -EINVAL;
}
- return drm_gem_cma_prime_mmap(obj, vma);
+ return drm_gem_prime_mmap(obj, vma);
}
-void *vc4_prime_vmap(struct drm_gem_object *obj)
+int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) {
DRM_DEBUG("mmaping of shader BOs not allowed.\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- return drm_gem_cma_prime_vmap(obj);
+ return drm_gem_cma_vmap(obj, map);
}
struct drm_gem_object *
@@ -1024,6 +1023,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1052,10 +1052,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
- return 0;
+ return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
}
-void vc4_bo_cache_destroy(struct drm_device *dev)
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f04f5cc8c839..269390bc586e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
-static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+static int vc4_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ unsigned int channel)
{
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
@@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
mdelay(20);
if (vc4_encoder && vc4_encoder->post_crtc_disable)
- vc4_encoder->post_crtc_disable(encoder);
+ vc4_encoder->post_crtc_disable(encoder, state);
vc4_crtc_pixelvalve_reset(crtc);
vc4_hvs_stop_channel(dev, channel);
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
- vc4_encoder->post_crtc_powerdown(encoder);
+ vc4_encoder->post_crtc_powerdown(encoder, state);
return 0;
}
@@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (channel < 0)
return 0;
- return vc4_crtc_disable(crtc, channel);
+ return vc4_crtc_disable(crtc, NULL, channel);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
+ vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
*/
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
if (vc4_encoder->pre_crtc_configure)
- vc4_encoder->pre_crtc_configure(encoder);
+ vc4_encoder->pre_crtc_configure(encoder, state);
vc4_crtc_config_pv(crtc);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
if (vc4_encoder->pre_crtc_enable)
- vc4_encoder->pre_crtc_enable(encoder);
+ vc4_encoder->pre_crtc_enable(encoder, state);
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
@@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
if (vc4_encoder->post_crtc_enable)
- vc4_encoder->post_crtc_enable(encoder);
+ vc4_encoder->post_crtc_enable(encoder, state);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -584,9 +584,11 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
}
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int ret, i;
@@ -595,7 +597,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state,
+ i) {
if (conn_state->crtc != crtc)
continue;
@@ -694,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
vc4_plane_async_set_fb(plane, flip_state->fb);
@@ -726,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
kfree(flip_state);
-
- up(&vc4->async_modeset);
}
/* Implements async (non-vblank-synced) page flips.
@@ -742,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
int ret = 0;
struct vc4_async_flip_state *flip_state;
@@ -771,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
flip_state->crtc = crtc;
flip_state->event = event;
- /* Make sure all other async modesetes have landed. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- drm_framebuffer_put(fb);
- vc4_bo_dec_usecnt(bo);
- kfree(flip_state);
- return ret;
- }
-
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
@@ -881,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4fbbf980a299..6da22af4ee91 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -16,7 +16,7 @@ struct vc4_debugfs_info_entry {
struct drm_info_list info;
};
-/**
+/*
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
*/
@@ -46,7 +46,7 @@ static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
return 0;
}
-/**
+/*
* Registers a debugfs file with a callback function for a vc4 component.
*
* This is like drm_debugfs_create_files(), but that can only be
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8f10f609e4f8..2cd97a39c286 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -245,37 +245,37 @@ static int vc4_drm_bind(struct device *dev)
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
- if (!vc4)
- return -ENOMEM;
-
/* If VC4 V3D is missing, don't advertise render nodes. */
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
if (!node || !of_device_is_available(node))
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node);
- drm = drm_dev_alloc(&vc4_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ if (IS_ERR(vc4))
+ return PTR_ERR(vc4);
+
+ drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- vc4->dev = drm;
- drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list);
mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_put;
+ return ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
- vc4_gem_init(drm);
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(dev, drm);
if (ret)
- goto gem_destroy;
+ return ret;
ret = vc4_plane_create_additional_planes(drm);
if (ret)
@@ -300,30 +300,17 @@ static int vc4_drm_bind(struct device *dev)
unbind_all:
component_unbind_all(dev, drm);
-gem_destroy:
- vc4_gem_destroy(drm);
- drm_mode_config_cleanup(drm);
- vc4_bo_cache_destroy(drm);
-dev_put:
- drm_dev_put(drm);
+
return ret;
}
static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- drm_mode_config_cleanup(drm);
-
- drm_atomic_private_obj_fini(&vc4->load_tracker);
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
-
- drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7003e7f14a48..051ad4e31e52 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,6 +14,7 @@
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -71,12 +72,11 @@ struct vc4_perfmon {
};
struct vc4_dev {
- struct drm_device *dev;
+ struct drm_device base;
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
- struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct vc4_txp *txp;
@@ -214,10 +214,9 @@ struct vc4_dev {
struct work_struct reset_work;
} hangcheck;
- struct semaphore async_modeset;
-
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
/* List of vc4_debugfs_info_entry for adding to debugfs once
@@ -234,7 +233,7 @@ struct vc4_dev {
static inline struct vc4_dev *
to_vc4_dev(struct drm_device *dev)
{
- return (struct vc4_dev *)dev->dev_private;
+ return container_of(dev, struct vc4_dev, base);
}
struct vc4_bo {
@@ -442,12 +441,12 @@ struct vc4_encoder {
enum vc4_encoder_type type;
u32 clock_select;
- void (*pre_crtc_configure)(struct drm_encoder *encoder);
- void (*pre_crtc_enable)(struct drm_encoder *encoder);
- void (*post_crtc_enable)(struct drm_encoder *encoder);
+ void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
- void (*post_crtc_disable)(struct drm_encoder *encoder);
- void (*post_crtc_powerdown)(struct drm_encoder *encoder);
+ void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
};
static inline struct vc4_encoder *
@@ -530,6 +529,9 @@ struct vc4_crtc_state {
unsigned int top;
unsigned int bottom;
} margins;
+
+ /* Transitional state below, only valid during atomic commits */
+ bool update_muxing;
};
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
@@ -806,9 +808,8 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *vc4_prime_vmap(struct drm_gem_object *obj);
+int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int vc4_bo_cache_init(struct drm_device *dev);
-void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -873,8 +874,7 @@ extern struct platform_driver vc4_dsi_driver;
extern const struct dma_fence_ops vc4_fence_ops;
/* vc4_gem.c */
-void vc4_gem_init(struct drm_device *dev);
-void vc4_gem_destroy(struct drm_device *dev);
+int vc4_gem_init(struct drm_device *dev);
int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
@@ -913,10 +913,10 @@ void vc4_irq_reset(struct drm_device *dev);
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
-int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state);
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 19aab4e7e209..a55256ed0955 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -306,11 +306,11 @@
# define DSI0_PHY_AFEC0_RESET BIT(11)
# define DSI1_PHY_AFEC0_PD_BG BIT(11)
# define DSI0_PHY_AFEC0_PD BIT(10)
-# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10)
# define DSI0_PHY_AFEC0_PD_BG BIT(9)
# define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9)
# define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8)
-# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8)
# define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4)
# define DSI_PHY_AFEC0_PTATADJ_SHIFT 4
# define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0)
@@ -493,6 +493,18 @@
*/
#define DSI1_ID 0x8c
+struct vc4_dsi_variant {
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ unsigned int port;
+
+ bool broken_axi_workaround;
+
+ const char *debugfs_name;
+ const struct debugfs_reg32 *regs;
+ size_t nregs;
+
+};
+
/* General DSI hardware state. */
struct vc4_dsi {
struct platform_device *pdev;
@@ -509,8 +521,7 @@ struct vc4_dsi {
u32 *reg_dma_mem;
dma_addr_t reg_paddr;
- /* Whether we're on bcm2835's DSI0 or DSI1. */
- int port;
+ const struct vc4_dsi_variant *variant;
/* DSI channel for the panel we're connected to. */
u32 channel;
@@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
#define DSI_READ(offset) readl(dsi->regs + (offset))
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
- DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
#define DSI_PORT_WRITE(offset, val) \
- DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
/* VC4 DSI encoder KMS struct */
struct vc4_dsi_encoder {
@@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
ret = pm_runtime_get_sync(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
/* Set AFE CTR00/CTR1 to release powerdown of analog. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
@@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
- (dsi->port == 0 ?
+ (dsi->variant->port == 0 ?
VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
@@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP1_ENABLE);
/* Ungate the block. */
- if (dsi->port == 0)
+ if (dsi->variant->port == 0)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
else
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
} else {
DSI_PORT_WRITE(PHY_AFEC0,
DSI_PORT_READ(PHY_AFEC0) &
@@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
+ .port = 1,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi0_variant = {
+ .port = 0,
+ .debugfs_name = "dsi0_regs",
+ .regs = dsi0_regs,
+ .nregs = ARRAY_SIZE(dsi0_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+ .port = 1,
+ .broken_axi_workaround = true,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
static const struct of_device_id vc4_dsi_dt_match[] = {
- { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant },
+ { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant },
+ { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
{}
};
@@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
*ret = IRQ_HANDLED;
}
@@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
struct device *dev = &dsi->pdev->dev;
const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
static const struct {
- const char *dsi0_name, *dsi1_name;
+ const char *name;
int div;
} phy_clocks[] = {
- { "dsi0_byte", "dsi1_byte", 8 },
- { "dsi0_ddr2", "dsi1_ddr2", 4 },
- { "dsi0_ddr", "dsi1_ddr", 2 },
+ { "byte", 8 },
+ { "ddr2", 4 },
+ { "ddr", 2 },
};
int i;
@@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
struct clk_init_data init;
+ char clk_name[16];
int ret;
+ snprintf(clk_name, sizeof(clk_name),
+ "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
/* We just use core fixed factor clock ops for the PHY
* clocks. The clocks are actually gated by the
* PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
memset(&init, 0, sizeof(init));
init.parent_names = &parent_name;
init.num_parents = 1;
- if (dsi->port == 1)
- init.name = phy_clocks[i].dsi1_name;
- else
- init.name = phy_clocks[i].dsi0_name;
+ init.name = clk_name;
init.ops = &clk_fixed_factor_ops;
ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
@@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
- dsi->port = (uintptr_t)match->data;
+ dsi->variant = match->data;
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
@@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(dsi->regs);
dsi->regset.base = dsi->regs;
- if (dsi->port == 0) {
- dsi->regset.regs = dsi0_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
- } else {
- dsi->regset.regs = dsi1_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
- }
+ dsi->regset.regs = dsi->variant->regs;
+ dsi->regset.nregs = dsi->variant->nregs;
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- /* DSI1 has a broken AXI slave that doesn't respond to writes
- * from the ARM. It does handle writes from the DMA engine,
+ /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to
+ * writes from the ARM. It does handle writes from the DMA engine,
* so set up a channel for talking to it.
*/
- if (dsi->port == 1) {
+ if (dsi->variant->broken_axi_workaround) {
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- if (dsi->port == 1)
- vc4->dsi1 = dsi;
-
drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
@@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
- if (dsi->port == 0)
- vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
- else
- vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
pm_runtime_enable(dev);
@@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
@@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
drm_encoder_cleanup(dsi->encoder);
-
- if (dsi->port == 1)
- vc4->dsi1 = NULL;
}
static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 9f01ddd5b932..b641252939d8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work)
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, hangcheck.reset_work);
- vc4_save_hang_state(vc4->dev);
+ vc4_save_hang_state(&vc4->base);
- vc4_reset(vc4->dev);
+ vc4_reset(&vc4->base);
}
static void
vc4_hangcheck_elapsed(struct timer_list *t)
{
struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
list_del(&exec->head);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
@@ -1258,13 +1258,13 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail:
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
return ret;
}
-void
-vc4_gem_init(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused);
+int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1285,10 +1285,11 @@ vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->purgeable.list);
mutex_init(&vc4->purgeable.lock);
+
+ return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
-void
-vc4_gem_destroy(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 95779d50cca0..2e5449b25ce4 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -76,12 +76,25 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
+
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0)
+
+#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31)
+
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
#define VC4_HSM_MID_CLOCK 149985000
+#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -170,16 +183,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
- drm_atomic_helper_connector_reset(connector);
+ struct vc4_hdmi_connector_state *old_state =
+ conn_state_to_vc4_hdmi_conn_state(connector->state);
+ struct vc4_hdmi_connector_state *new_state =
+ kzalloc(sizeof(*new_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(old_state);
+ __drm_atomic_helper_connector_reset(connector, &new_state->base);
+
+ if (!new_state)
+ return;
+
+ new_state->base.max_bpc = 8;
+ new_state->base.max_requested_bpc = 8;
drm_atomic_helper_connector_tv_reset(connector);
}
+static struct drm_connector_state *
+vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state = connector->state;
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+ struct vc4_hdmi_connector_state *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ new_state->pixel_rate = vc4_state->pixel_rate;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ return &new_state->base;
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -200,12 +245,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
vc4_hdmi->ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ret;
drm_connector_attach_tv_margin_properties(connector);
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
@@ -219,7 +272,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
- enum hdmi_infoframe_type type)
+ enum hdmi_infoframe_type type,
+ bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
@@ -227,6 +281,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ if (!poll)
+ return 0;
+
return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
@@ -253,7 +310,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
if (len < 0)
return;
- ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+ ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
return;
@@ -331,9 +388,8 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
union hdmi_infoframe frame;
- int ret;
- ret = hdmi_audio_infoframe_init(&frame.audio);
+ hdmi_audio_infoframe_init(&frame.audio);
frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
@@ -357,7 +413,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_audio_infoframe(encoder);
}
-static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -370,7 +427,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
}
-static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
int ret;
@@ -469,6 +527,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -512,7 +571,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
}
+
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -532,6 +593,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned char gcp;
+ bool gcp_en;
+ u32 reg;
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
@@ -557,6 +621,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+ switch (state->max_bpc) {
+ case 12:
+ gcp = 6;
+ gcp_en = true;
+ break;
+ case 10:
+ gcp = 5;
+ gcp_en = true;
+ break;
+ case 8:
+ default:
+ gcp = 4;
+ gcp_en = false;
+ break;
+ }
+
+ reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
+ reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
+ VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
+ reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
+ VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
+ HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_WORD_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
+ reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ HDMI_WRITE(HDMI_GCP_WORD_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_CONFIG);
+ reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
+ reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
}
@@ -584,8 +681,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
-static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+static struct drm_connector_state *
+vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return conn_state;
+ }
+
+ return NULL;
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state =
+ vc4_hdmi_encoder_get_connector_state(encoder, state);
+ struct vc4_hdmi_connector_state *vc4_conn_state =
+ conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long pixel_rate, hsm_rate;
@@ -597,7 +715,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+ pixel_rate = vc4_conn_state->pixel_rate;
ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
@@ -665,7 +783,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
vc4_hdmi->variant->reset(vc4_hdmi);
if (vc4_hdmi->variant->phy_init)
- vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+ vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
@@ -673,10 +791,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
if (vc4_hdmi->variant->set_timings)
- vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+ vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
}
-static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
@@ -698,7 +817,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
}
-static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -760,12 +880,68 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
{
}
+#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
+#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
+
+static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long long pixel_rate = mode->clock * 1000;
+ unsigned long long tmds_rate;
+
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return -EINVAL;
+
+ /*
+ * The 1440p@60 pixel rate is in the same range than the first
+ * WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
+ * bandwidth). Slightly lower the frequency to bring it out of
+ * the WiFi range.
+ */
+ tmds_rate = pixel_rate * 10;
+ if (vc4_hdmi->disable_wifi_frequencies &&
+ (tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
+ tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
+ mode->clock = 238560;
+ pixel_rate = mode->clock * 1000;
+ }
+
+ if (conn_state->max_bpc == 12) {
+ pixel_rate = pixel_rate * 150;
+ do_div(pixel_rate, 100);
+ } else if (conn_state->max_bpc == 10) {
+ pixel_rate = pixel_rate * 125;
+ do_div(pixel_rate, 100);
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ pixel_rate = pixel_rate * 2;
+
+ if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
+ return -EINVAL;
+
+ vc4_state->pixel_rate = pixel_rate;
+
+ return 0;
+}
+
static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return MODE_H_ILLEGAL;
+
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
@@ -773,6 +949,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
+ .atomic_check = vc4_hdmi_encoder_atomic_check,
.mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
@@ -894,7 +1071,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
int ret;
vc4_hdmi->audio.streaming = false;
- ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
+ ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
@@ -1694,6 +1871,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
+ vc4_hdmi->disable_wifi_frequencies =
+ of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1808,7 +1988,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -1817,6 +1997,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
PHY_LANE_2,
PHY_LANE_CK,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
@@ -1833,7 +2014,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
@@ -1842,6 +2023,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
PHY_LANE_CK,
PHY_LANE_2,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 63c6f8bddf1d..4c8994cfd932 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_hdmi_encoder, base.base);
}
-struct drm_display_mode;
-
struct vc4_hdmi;
struct vc4_hdmi_register;
+struct vc4_hdmi_connector_state;
enum vc4_hdmi_phy_channel {
PHY_LANE_0 = 0,
@@ -62,6 +61,9 @@ struct vc4_hdmi_variant {
*/
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
+ /* The BCM2711 cannot deal with odd horizontal pixel timings */
+ bool unsupported_odd_h_timings;
+
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
@@ -75,11 +77,12 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode);
- /* Callback to initialize the PHY according to the mode */
+ /* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
/* Callback to disable the PHY */
void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
@@ -139,6 +142,14 @@ struct vc4_hdmi {
int hpd_gpio;
bool hpd_active_low;
+ /*
+ * On some systems (like the RPi4), some modes are in the same
+ * frequency range than the WiFi channels (1440p@60Hz for
+ * example). Should we take evasive actions because that system
+ * has a wifi adapter?
+ */
+ bool disable_wifi_frequencies;
+
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
@@ -169,14 +180,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder)
return container_of(_encoder, struct vc4_hdmi, encoder);
}
+struct vc4_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned long long pixel_rate;
+};
+
+static inline struct vc4_hdmi_connector_state *
+conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state)
+{
+ return container_of(conn_state, struct vc4_hdmi_connector_state, base);
+}
+
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 057796b54c51..36535480f8e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -127,7 +127,8 @@
#define OSCILLATOR_FREQUENCY 54000000
-void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
@@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
-void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- unsigned long long pixel_freq = mode->clock * 1000;
+ unsigned long long pixel_freq = conn_state->pixel_rate;
unsigned long long vco_freq;
unsigned char word_sel;
u8 vco_sel, vco_div;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 7c6b4818f245..401863cb8c98 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -59,9 +59,12 @@ enum vc4_hdmi_field {
*/
HDMI_CTS_0,
HDMI_CTS_1,
+ HDMI_DEEP_COLOR_CONFIG_1,
HDMI_DVP_CTL,
HDMI_FIFO_CTL,
HDMI_FRAME_COUNT,
+ HDMI_GCP_CONFIG,
+ HDMI_GCP_WORD_1,
HDMI_HORZA,
HDMI_HORZB,
HDMI_HOTPLUG,
@@ -142,7 +145,7 @@ struct vc4_hdmi_register {
#define VC5_RAM_REG(reg, offset) _VC4_REG(VC5_RAM, reg, offset)
#define VC5_RM_REG(reg, offset) _VC4_REG(VC5_RM, reg, offset)
-static const struct vc4_hdmi_register vc4_hdmi_fields[] = {
+static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
VC4_HD_REG(HDMI_M_CTL, 0x000c),
VC4_HD_REG(HDMI_MAI_CTL, 0x0014),
VC4_HD_REG(HDMI_MAI_THR, 0x0018),
@@ -203,7 +206,7 @@ static const struct vc4_hdmi_register vc4_hdmi_fields[] = {
VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
};
-static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = {
+static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
VC4_HD_REG(HDMI_MAI_CTL, 0x0010),
VC4_HD_REG(HDMI_MAI_THR, 0x0014),
@@ -229,6 +232,9 @@ static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
@@ -279,7 +285,7 @@ static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = {
VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
};
-static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = {
+static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
VC4_HD_REG(HDMI_MAI_CTL, 0x0030),
VC4_HD_REG(HDMI_MAI_THR, 0x0034),
@@ -305,6 +311,9 @@ static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4d0a833366ce..2b3a597fa65f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
SCALER_DISPSTATX_EMPTY);
}
-int vc4_hvs_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
@@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc,
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
*/
- if (hweight32(state->connector_mask) > 1)
+ if (hweight32(crtc_state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
dlist_count += vc4_plane_dlist_size(plane_state);
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
}
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool oneshot = vc4_state->feed_txp;
@@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
unsigned int chan = vc4_state->assigned_channel;
@@ -414,8 +416,10 @@ void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
@@ -560,7 +564,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
@@ -679,7 +683,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 149825ff5df8..f09254c2497d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -24,6 +24,8 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
+#define HVS_NUM_CHANNELS 3
+
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
@@ -35,6 +37,21 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_hvs_state {
+ struct drm_private_state base;
+
+ struct {
+ unsigned in_use: 1;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
+};
+
+static struct vc4_hvs_state *
+to_vc4_hvs_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_hvs_state, base);
+}
+
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
@@ -51,7 +68,7 @@ static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
struct drm_device *dev = state->dev;
- struct vc4_dev *vc4 = dev->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_private_state *priv_state;
int ret;
@@ -93,6 +110,29 @@ static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
.atomic_destroy_state = vc4_ctm_destroy_state,
};
+static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+}
+
+static int vc4_ctm_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_ctm_state *ctm_state;
+
+ drm_modeset_lock_init(&vc4->ctm_state_lock);
+
+ ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+ if (!ctm_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
+ &vc4_ctm_state_funcs);
+
+ return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
+}
+
/* Converts a DRM S31.32 value to the HW S0.9 format. */
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
{
@@ -146,6 +186,45 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
+static struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_atomic_state *state)
{
@@ -190,10 +269,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned char dsp2_mux = 0;
- unsigned char dsp3_mux = 3;
- unsigned char dsp4_mux = 3;
- unsigned char dsp5_mux = 3;
+ unsigned char mux;
unsigned int i;
u32 reg;
@@ -201,60 +277,70 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- if (!crtc_state->active)
+ if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
- dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ HVS_WRITE(SCALER_DISPECTRL,
+ (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
break;
case 3:
- dsp3_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ HVS_WRITE(SCALER_DISPCTRL,
+ (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
break;
case 4:
- dsp4_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ HVS_WRITE(SCALER_DISPEOLN,
+ (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
+
break;
case 5:
- dsp5_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ HVS_WRITE(SCALER_DISPDITHER,
+ (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
break;
default:
break;
}
}
-
- reg = HVS_READ(SCALER_DISPECTRL);
- HVS_WRITE(SCALER_DISPECTRL,
- (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
- VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
-
- reg = HVS_READ(SCALER_DISPCTRL);
- HVS_WRITE(SCALER_DISPCTRL,
- (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
- VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
-
- reg = HVS_READ(SCALER_DISPEOLN);
- HVS_WRITE(SCALER_DISPEOLN,
- (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
- VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
-
- reg = HVS_READ(SCALER_DISPDITHER);
- HVS_WRITE(SCALER_DISPDITHER,
- (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
- VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
}
-static void
-vc4_atomic_complete_commit(struct drm_atomic_state *state)
+static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
+ struct vc4_hvs_state *old_hvs_state;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -270,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
+ if (!old_hvs_state)
+ return;
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
+ struct drm_crtc_commit *commit;
+ unsigned int channel = vc4_crtc_state->assigned_channel;
+ unsigned long done;
+
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
+
+ if (!old_hvs_state->fifo_state[channel].in_use)
+ continue;
+
+ commit = old_hvs_state->fifo_state[i].pending_commit;
+ if (!commit)
+ continue;
- drm_atomic_helper_wait_for_dependencies(state);
+ done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for hw_done\n");
+
+ done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for flip_done\n");
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -295,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_helper_commit_cleanup_done(state);
-
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 0);
-
- drm_atomic_state_put(state);
-
- up(&vc4->async_modeset);
}
-static void commit_work(struct work_struct *work)
-{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
- vc4_atomic_complete_commit(state);
-}
-
-/**
- * vc4_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int vc4_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
-
- if (state->async_update) {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
-
- drm_atomic_helper_async_commit(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- up(&vc4->async_modeset);
-
- return 0;
- }
+ struct drm_crtc_state *crtc_state;
+ struct vc4_hvs_state *hvs_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
- /* We know for sure we don't want an async update here. Set
- * state->legacy_cursor_update to false to prevent
- * drm_atomic_helper_setup_commit() from auto-completing
- * commit->flip_done.
- */
- state->legacy_cursor_update = false;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ if (!hvs_state)
+ return -EINVAL;
- INIT_WORK(&state->commit_work, commit_work);
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(crtc_state);
+ unsigned int channel =
+ vc4_crtc_state->assigned_channel;
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
+ if (!hvs_state->fifo_state[channel].in_use)
+ continue;
- if (!nonblock) {
- ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- up(&vc4->async_modeset);
- return ret;
- }
+ hvs_state->fifo_state[channel].pending_commit =
+ drm_crtc_commit_get(crtc_state->commit);
}
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
- vc4_atomic_complete_commit(state);
-
return 0;
}
@@ -609,50 +633,172 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
-#define NUM_OUTPUTS 6
-#define NUM_CHANNELS 3
+static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (!vc4->load_tracker_available)
+ return;
+
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
+}
+
+static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ if (!vc4->load_tracker_available)
+ return 0;
+
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
+ &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
+ return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
+}
+
+static struct drm_private_state *
+vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
+{
+ struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
+ struct vc4_hvs_state *state;
+ unsigned int i;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+
+ if (!old_state->fifo_state[i].pending_commit)
+ continue;
+
+ state->fifo_state[i].pending_commit =
+ drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
+ }
+
+ return &state->base;
+}
+
+static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+ unsigned int i;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].pending_commit)
+ continue;
+
+ drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
+ }
+
+ kfree(hvs_state);
+}
+
+static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
+ .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
+ .atomic_destroy_state = vc4_hvs_channels_destroy_state,
+};
+
+static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
-static int
-vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+ drm_atomic_private_obj_fini(&vc4->hvs_channels);
+}
+
+static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_hvs_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
+ &state->base,
+ &vc4_hvs_state_funcs);
+
+ return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
+}
+
+/*
+ * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
+ * the TXP (and therefore all the CRTCs found on that platform).
+ *
+ * The naive (and our initial) implementation would just iterate over
+ * all the active CRTCs, try to find a suitable FIFO, and then remove it
+ * from the pool of available FIFOs. However, there are a few corner
+ * cases that need to be considered:
+ *
+ * - When running in a dual-display setup (so with two CRTCs involved),
+ * we can update the state of a single CRTC (for example by changing
+ * its mode using xrandr under X11) without affecting the other. In
+ * this case, the other CRTC wouldn't be in the state at all, so we
+ * need to consider all the running CRTCs in the DRM device to assign
+ * a FIFO, not just the one in the state.
+ *
+ * - To fix the above, we can't use drm_atomic_get_crtc_state on all
+ * enabled CRTCs to pull their CRTC state into the global state, since
+ * a page flip would start considering their vblank to complete. Since
+ * we don't have a guarantee that they are actually active, that
+ * vblank might never happen, and shouldn't even be considered if we
+ * want to do a page flip on a single CRTC. That can be tested by
+ * doing a modetest -v first on HDMI1 and then on HDMI0.
+ *
+ * - Since we need the pixelvalve to be disabled and enabled back when
+ * the FIFO is changed, we should keep the FIFO assigned for as long
+ * as the CRTC is enabled, only considering it free again once that
+ * CRTC has been disabled. This can be tested by booting X11 on a
+ * single display, and changing the resolution down and then back up.
+ */
+static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
+ struct vc4_hvs_state *hvs_new_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i, ret;
-
- /*
- * Since the HVS FIFOs are shared across all the pixelvalves and
- * the TXP (and thus all the CRTCs), we need to pull the current
- * state of all the enabled CRTCs so that an update to a single
- * CRTC still keeps the previous FIFOs enabled and assigned to
- * the same CRTCs, instead of evaluating only the CRTC being
- * modified.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_state *crtc_state;
+ unsigned int unassigned_channels = 0;
+ unsigned int i;
- if (!crtc->state->enable)
- continue;
+ hvs_new_state = vc4_hvs_get_global_state(state);
+ if (!hvs_new_state)
+ return -EINVAL;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
+ if (!hvs_new_state->fifo_state[i].in_use)
+ unassigned_channels |= BIT(i);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct vc4_crtc_state *old_vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
struct vc4_crtc_state *new_vc4_crtc_state =
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
+ unsigned int channel;
- if (old_crtc_state->enable && !new_crtc_state->enable)
- new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
-
- if (!new_crtc_state->enable)
+ /* Nothing to do here, let's skip it */
+ if (old_crtc_state->enable == new_crtc_state->enable)
continue;
- if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
- unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
+ /* Muxing will need to be modified, mark it as such */
+ new_vc4_crtc_state->update_muxing = true;
+
+ /* If we're disabling our CRTC, we put back our channel */
+ if (!new_crtc_state->enable) {
+ channel = old_vc4_crtc_state->assigned_channel;
+ hvs_new_state->fifo_state[channel].in_use = false;
+ new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -681,16 +827,27 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
* but it works so far.
*/
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (matching_channels) {
- unsigned int channel = ffs(matching_channels) - 1;
-
- new_vc4_crtc_state->assigned_channel = channel;
- unassigned_channels &= ~BIT(channel);
- } else {
+ if (!matching_channels)
return -EINVAL;
- }
+
+ channel = ffs(matching_channels) - 1;
+ new_vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ hvs_new_state->fifo_state[channel].in_use = true;
}
+ return 0;
+}
+
+static int
+vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = vc4_pv_muxing_atomic_check(dev, state);
+ if (ret)
+ return ret;
+
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
return ret;
@@ -702,17 +859,20 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return vc4_load_tracker_atomic_check(state);
}
+static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
+ .atomic_commit_setup = vc4_atomic_commit_setup,
+ .atomic_commit_tail = vc4_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.atomic_check = vc4_atomic_check,
- .atomic_commit = vc4_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
.fb_create = vc4_fb_create,
};
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_ctm_state *ctm_state;
- struct vc4_load_tracker_state *load_state;
bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
"brcm,bcm2711-vc5");
int ret;
@@ -726,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev)
vc4->load_tracker_enabled = true;
}
- sema_init(&vc4->async_modeset, 1);
-
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
@@ -747,30 +905,22 @@ int vc4_kms_load(struct drm_device *dev)
}
dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
- drm_modeset_lock_init(&vc4->ctm_state_lock);
-
- ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
- if (!ctm_state)
- return -ENOMEM;
-
- drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
- &vc4_ctm_state_funcs);
+ ret = vc4_ctm_obj_init(vc4);
+ if (ret)
+ return ret;
- if (vc4->load_tracker_available) {
- load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
- if (!load_state) {
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
- return -ENOMEM;
- }
+ ret = vc4_load_tracker_obj_init(vc4);
+ if (ret)
+ return ret;
- drm_atomic_private_obj_init(dev, &vc4->load_tracker,
- &load_state->base,
- &vc4_load_tracker_state_funcs);
- }
+ ret = vc4_hvs_channels_obj_init(vc4);
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index f4aa75efd16b..18abc06335c1 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -77,7 +77,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
void vc4_perfmon_open_file(struct vc4_file *vc4file)
{
mutex_init(&vc4file->perfmon.lock);
- idr_init(&vc4file->perfmon.idr);
+ idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
}
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index e0e0b72ea65c..c0122d83b651 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -273,8 +273,10 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
}
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
- struct drm_connector_state *conn_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
+ conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
struct drm_gem_cma_object *gem;
struct drm_display_mode *mode;
@@ -380,22 +382,23 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
- state->no_vblank = true;
+ crtc_state->no_vblank = true;
vc4_state->feed_txp = true;
return 0;
@@ -404,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_hvs_atomic_disable(crtc, old_state);
+ vc4_hvs_atomic_disable(crtc, state);
/*
* Make sure we issue a vblank event after disabling the CRTC if
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index f7ab979721b3..73d63d72575b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -122,7 +122,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
return 0;
}
-/**
+/*
* Wraps pm_runtime_get_sync() in a refcount, so that we can reliably
* get the pm_runtime refcount to 0 in vc4_reset().
*/
@@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
{
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
unsigned long irqflags;
int slot;
uint64_t seqno = 0;
@@ -205,7 +205,7 @@ try_again:
return -ENOMEM;
}
-/**
+/*
* bin_bo_alloc() - allocates the memory that will be used for
* tile binning.
*
@@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
INIT_LIST_HEAD(&list);
while (true) {
- struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
+ struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) {
@@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
struct vc4_v3d *v3d = dev_get_drvdata(dev);
struct vc4_dev *vc4 = v3d->vc4;
- vc4_irq_uninstall(vc4->dev);
+ vc4_irq_uninstall(&vc4->base);
clk_disable_unprepare(v3d->clk);
@@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev)
if (ret != 0)
return ret;
- vc4_v3d_init_hw(vc4->dev);
+ vc4_v3d_init_hw(&vc4->base);
/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->dev->irq);
- vc4_irq_postinstall(vc4->dev);
+ enable_irq(vc4->base.irq);
+ vc4_irq_postinstall(&vc4->base);
return 0;
}