From 2313b1897f574e036635b25c501ec5e008407241 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 7 Dec 2018 16:16:51 +0800 Subject: drm/i915/gvt: mandatory require hypervisor's host_init Don't mark hypervisor module's host_init as optional, but mandatory required. Reviewed-by: Yuan, Hang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mpt.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..ce721099a020 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -50,11 +50,10 @@ * Zero on success, negative error code if failed */ static inline int intel_gvt_hypervisor_host_init(struct device *dev, - void *gvt, const void *ops) + void *gvt, const void *ops) { - /* optional to provide */ if (!intel_gvt_host.mpt->host_init) - return 0; + return -ENODEV; return intel_gvt_host.mpt->host_init(dev, gvt, ops); } -- cgit v1.2.3 From a2b8419a9e2975d19c0cd85f4912f2873bd974e0 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 7 Dec 2018 16:16:52 +0800 Subject: drm/i915/gvt: remove unused parameter for hypervisor's host_exit call The parameter 'void *gvt' is not used and required for hypervisor's exit call. Even for non-merged Xen hypervisor support. So just remove it. Reviewed-by: Yuan, Hang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 2 +- drivers/gpu/drm/i915/gvt/hypercall.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- drivers/gpu/drm/i915/gvt/mpt.h | 5 ++--- 4 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 733a2a0d0c30..a5b760b7bc10 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -316,7 +316,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) return; intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); + intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev); intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_clean_vgpu_types(gvt); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e49a9247ed78 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -39,7 +39,7 @@ */ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt, const void *ops); - void (*host_exit)(struct device *dev, void *gvt); + void (*host_exit)(struct device *dev); int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(unsigned long handle); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..1bbd04d30c42 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1467,7 +1467,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) return mdev_register_device(dev, &intel_vgpu_ops); } -static void kvmgt_host_exit(struct device *dev, void *gvt) +static void kvmgt_host_exit(struct device *dev) { mdev_unregister_device(dev); } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ce721099a020..c95ef77da62c 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -61,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev, /** * intel_gvt_hypervisor_host_exit - exit GVT-g host side */ -static inline void intel_gvt_hypervisor_host_exit(struct device *dev, - void *gvt) +static inline void intel_gvt_hypervisor_host_exit(struct device *dev) { /* optional to provide */ if (!intel_gvt_host.mpt->host_exit) return; - intel_gvt_host.mpt->host_exit(dev, gvt); + intel_gvt_host.mpt->host_exit(dev); } /** -- cgit v1.2.3 From 9bdb073464d6008ed1839d358e320108ed12daae Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 7 Dec 2018 16:16:53 +0800 Subject: drm/i915/gvt: Change KVMGT as self load module This trys to make 'kvmgt' module as self loadable instead of loading by i915/gvt device model. So hypervisor specific module could be stand-alone, e.g only after loading hypervisor specific module, GVT feature could be enabled via specific hypervisor interface, e.g VFIO/mdev. So this trys to use hypervisor module register/unregister interface for that. Hypervisor module needs to take care of module reference itself when working for hypervisor interface, e.g for VFIO/mdev, hypervisor module would reference counting mdev when open and release. This makes 'kvmgt' module really split from GVT device model. User needs to load 'kvmgt' to enable VFIO/mdev interface. v6: - remove unused variable v5: - put module reference in register error path v4: - fix checkpatch warning v3: - Fix module reference handling for device open and release. Unused mdev devices would be cleaned up in device unregister when module unload. v2: - Fix kvmgt order after i915 for built-in case Cc: "Yuan, Hang" Cc: Alex Williamson Cc: "He, Min" Reviewed-by: Yuan, Hang Acked-by: Joonas Lahtinen Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gvt/Makefile | 1 - drivers/gpu/drm/i915/gvt/gvt.c | 108 ++++++++++++++--------------------- drivers/gpu/drm/i915/gvt/gvt.h | 6 +- drivers/gpu/drm/i915/gvt/hypercall.h | 7 ++- drivers/gpu/drm/i915/gvt/kvmgt.c | 16 +++++- drivers/gpu/drm/i915/gvt/mpt.h | 3 + drivers/gpu/drm/i915/intel_gvt.c | 9 --- 8 files changed, 69 insertions(+), 82 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 19b5fe5016bf..63893fe00711 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -198,3 +198,4 @@ endif i915-y += intel_lpe_audio.o obj-$(CONFIG_DRM_I915) += i915.o +obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index b016dc753db9..271fb46d4dd0 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) -obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index a5b760b7bc10..4e8947f33bd0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -187,52 +187,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { .write_protect_handler = intel_vgpu_page_track_handler, }; -/** - * intel_gvt_init_host - Load MPT modules and detect if we're running in host - * - * This function is called at the driver loading stage. If failed to find a - * loadable MPT module or detect currently we're running in a VM, then GVT-g - * will be disabled - * - * Returns: - * Zero on success, negative error code if failed. - * - */ -int intel_gvt_init_host(void) -{ - if (intel_gvt_host.initialized) - return 0; - - /* Xen DOM U */ - if (xen_domain() && !xen_initial_domain()) - return -ENODEV; - - /* Try to load MPT modules for hypervisors */ - if (xen_initial_domain()) { - /* In Xen dom0 */ - intel_gvt_host.mpt = try_then_request_module( - symbol_get(xengt_mpt), "xengt"); - intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; - } else { -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) - /* not in Xen. Try KVMGT */ - intel_gvt_host.mpt = try_then_request_module( - symbol_get(kvmgt_mpt), "kvmgt"); - intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; -#endif - } - - /* Fail to load MPT modules - bail out */ - if (!intel_gvt_host.mpt) - return -EINVAL; - - gvt_dbg_core("Running with hypervisor %s in host mode\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); - - intel_gvt_host.initialized = true; - return 0; -} - static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; @@ -316,7 +270,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) return; intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev); intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_clean_vgpu_types(gvt); @@ -352,13 +305,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) struct intel_vgpu *vgpu; int ret; - /* - * Cannot initialize GVT device without intel_gvt_host gets - * initialized first. - */ - if (WARN_ON(!intel_gvt_host.initialized)) - return -EINVAL; - if (WARN_ON(dev_priv->gvt)) return -EEXIST; @@ -420,13 +366,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_types; } - ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, - &intel_gvt_ops); - if (ret) { - gvt_err("failed to register gvt-g host device: %d\n", ret); - goto out_clean_types; - } - vgpu = intel_gvt_create_idle_vgpu(gvt); if (IS_ERR(vgpu)) { ret = PTR_ERR(vgpu); @@ -441,6 +380,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; + intel_gvt_host.dev = &dev_priv->drm.pdev->dev; + intel_gvt_host.initialized = true; return 0; out_clean_types: @@ -467,6 +408,45 @@ out_clean_idr: return ret; } -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) -MODULE_SOFTDEP("pre: kvmgt"); -#endif +int +intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) +{ + int ret; + void *gvt; + + if (!intel_gvt_host.initialized) + return -ENODEV; + + if (m->type != INTEL_GVT_HYPERVISOR_KVM && + m->type != INTEL_GVT_HYPERVISOR_XEN) + return -EINVAL; + + /* Get a reference for device model module */ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + intel_gvt_host.mpt = m; + intel_gvt_host.hypervisor_type = m->type; + gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; + + ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, + &intel_gvt_ops); + if (ret < 0) { + gvt_err("Failed to init %s hypervisor module\n", + supported_hypervisors[intel_gvt_host.hypervisor_type]); + module_put(THIS_MODULE); + return -ENODEV; + } + gvt_dbg_core("Running with hypervisor %s in host mode\n", + supported_hypervisors[intel_gvt_host.hypervisor_type]); + return 0; +} +EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); + +void +intel_gvt_unregister_hypervisor(void) +{ + intel_gvt_hypervisor_host_exit(intel_gvt_host.dev); + module_put(THIS_MODULE); +} +EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b4ab1dad0143..8a4cf995d755 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -52,12 +52,8 @@ #define GVT_MAX_VGPU 8 -enum { - INTEL_GVT_HYPERVISOR_XEN = 0, - INTEL_GVT_HYPERVISOR_KVM, -}; - struct intel_gvt_host { + struct device *dev; bool initialized; int hypervisor_type; struct intel_gvt_mpt *mpt; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index e49a9247ed78..50798868ab15 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -33,11 +33,17 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ +enum hypervisor_type { + INTEL_GVT_HYPERVISOR_XEN = 0, + INTEL_GVT_HYPERVISOR_KVM, +}; + /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { + enum hypervisor_type type; int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev); int (*attach_vgpu)(void *vgpu, unsigned long *handle); @@ -67,6 +73,5 @@ struct intel_gvt_mpt { }; extern struct intel_gvt_mpt xengt_mpt; -extern struct intel_gvt_mpt kvmgt_mpt; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1bbd04d30c42..a19e684e621a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -627,6 +627,12 @@ static int intel_vgpu_open(struct mdev_device *mdev) goto undo_iommu; } + /* Take a module reference as mdev core doesn't take + * a reference for vendor driver. + */ + if (!try_module_get(THIS_MODULE)) + goto undo_group; + ret = kvmgt_guest_init(mdev); if (ret) goto undo_group; @@ -679,6 +685,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) &vgpu->vdev.group_notifier); WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); + /* dereference module reference taken at open */ + module_put(THIS_MODULE); + info = (struct kvmgt_guest_info *)vgpu->handle; kvmgt_guest_exit(info); @@ -1849,7 +1858,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) return ret; } -struct intel_gvt_mpt kvmgt_mpt = { +static struct intel_gvt_mpt kvmgt_mpt = { + .type = INTEL_GVT_HYPERVISOR_KVM, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, .attach_vgpu = kvmgt_attach_vgpu, @@ -1868,15 +1878,17 @@ struct intel_gvt_mpt kvmgt_mpt = { .put_vfio_device = kvmgt_put_vfio_device, .is_valid_gfn = kvmgt_is_valid_gfn, }; -EXPORT_SYMBOL_GPL(kvmgt_mpt); static int __init kvmgt_init(void) { + if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) + return -ENODEV; return 0; } static void __exit kvmgt_exit(void) { + intel_gvt_unregister_hypervisor(); } module_init(kvmgt_init); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index c95ef77da62c..9b4225d44243 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -360,4 +360,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } +int intel_gvt_register_hypervisor(struct intel_gvt_mpt *); +void intel_gvt_unregister_hypervisor(void); + #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index c22b3e18a0f5..d74e59e22c9d 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -105,15 +105,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return -EIO; } - /* - * We're not in host or fail to find a MPT module, disable GVT-g - */ - ret = intel_gvt_init_host(); - if (ret) { - DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); - goto bail; - } - ret = intel_gvt_init_device(dev_priv); if (ret) { DRM_DEBUG_DRIVER("Fail to init GVT device\n"); -- cgit v1.2.3 From ba64bd96393414f29f0124ca8d6ca8b4b83d4902 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 8 Jan 2019 10:25:45 +0200 Subject: drm/i915/gvt: remove drmP.h include drmP.h is deprecated and no longer needed. Reviewed-by: Zhenyu Wang Signed-off-by: Jani Nikula Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 51ed99a37803..2eb681175fae 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -29,7 +29,6 @@ */ #include -#include #include #include "i915_drv.h" -- cgit v1.2.3 From ed8cce30163f7b39cbca9c214a17789e7d516ff3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 8 Jan 2019 16:12:04 +0200 Subject: drm/i915/gvt: give the cmd parser decode_info a const treatment It doesn't need to be changed, make it const. The string literals should anyway be referred to as const data. The following gets moved to rodata section: 0000000000000410 l O .rodata 0000000000000018 decode_info_mi 0000000000000390 l O .rodata 0000000000000018 decode_info_3d_media 00000000000003e0 l O .rodata 0000000000000018 decode_info_2d 0000000000000330 l O .rodata 0000000000000018 decode_info_mfx_vc 00000000000002e0 l O .rodata 0000000000000018 decode_info_vebox 0000000000000300 l O .rodata 0000000000000028 sub_op_vebox 0000000000000360 l O .rodata 0000000000000028 sub_op_mfx_vc 00000000000003c0 l O .rodata 0000000000000020 sub_op_3d_media 0000000000000400 l O .rodata 0000000000000010 sub_op_2d 0000000000000430 l O .rodata 0000000000000010 sub_op_mi Reviewed-by: Yan Zhao Signed-off-by: Jani Nikula Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 77ae634eb11c..98415d465a09 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -55,10 +55,10 @@ struct sub_op_bits { int low; }; struct decode_info { - char *name; + const char *name; int op_len; int nr_sub_op; - struct sub_op_bits *sub_op; + const struct sub_op_bits *sub_op; }; #define MAX_CMD_BUDGET 0x7fffffff @@ -485,12 +485,12 @@ struct parser_exec_state { static unsigned long bypass_scan_mask = 0; /* ring ALL, type = 0 */ -static struct sub_op_bits sub_op_mi[] = { +static const struct sub_op_bits sub_op_mi[] = { {31, 29}, {28, 23}, }; -static struct decode_info decode_info_mi = { +static const struct decode_info decode_info_mi = { "MI", OP_LEN_MI, ARRAY_SIZE(sub_op_mi), @@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = { }; /* ring RCS, command type 2 */ -static struct sub_op_bits sub_op_2d[] = { +static const struct sub_op_bits sub_op_2d[] = { {31, 29}, {28, 22}, }; -static struct decode_info decode_info_2d = { +static const struct decode_info decode_info_2d = { "2D", OP_LEN_2D, ARRAY_SIZE(sub_op_2d), @@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = { }; /* ring RCS, command type 3 */ -static struct sub_op_bits sub_op_3d_media[] = { +static const struct sub_op_bits sub_op_3d_media[] = { {31, 29}, {28, 27}, {26, 24}, {23, 16}, }; -static struct decode_info decode_info_3d_media = { +static const struct decode_info decode_info_3d_media = { "3D_Media", OP_LEN_3D_MEDIA, ARRAY_SIZE(sub_op_3d_media), @@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = { }; /* ring VCS, command type 3 */ -static struct sub_op_bits sub_op_mfx_vc[] = { +static const struct sub_op_bits sub_op_mfx_vc[] = { {31, 29}, {28, 27}, {26, 24}, @@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = { {20, 16}, }; -static struct decode_info decode_info_mfx_vc = { +static const struct decode_info decode_info_mfx_vc = { "MFX_VC", OP_LEN_MFX_VC, ARRAY_SIZE(sub_op_mfx_vc), @@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = { }; /* ring VECS, command type 3 */ -static struct sub_op_bits sub_op_vebox[] = { +static const struct sub_op_bits sub_op_vebox[] = { {31, 29}, {28, 27}, {26, 24}, @@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = { {20, 16}, }; -static struct decode_info decode_info_vebox = { +static const struct decode_info decode_info_vebox = { "VEBOX", OP_LEN_VEBOX, ARRAY_SIZE(sub_op_vebox), sub_op_vebox, }; -static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { +static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { [RCS] = { &decode_info_mi, NULL, @@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { static inline u32 get_opcode(u32 cmd, int ring_id) { - struct decode_info *d_info; + const struct decode_info *d_info; d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; if (d_info == NULL) @@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) static inline void print_opcode(u32 cmd, int ring_id) { - struct decode_info *d_info; + const struct decode_info *d_info; int i; d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; -- cgit v1.2.3 From b007065a0aebfba859cecbc23271542b04784567 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 8 Jan 2019 16:12:05 +0200 Subject: drm/i915/gvt: give the cmd parser cmd_info a const treatment It doesn't need to be changed, make it const. The string literals should anyway be referred to as const data. The following gets moved to rodata section: 0000000000000080 l O .rodata 0000000000001c00 cmd_info Reviewed-by: Yan Zhao Signed-off-by: Jani Nikula Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/gvt/trace.h | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 98415d465a09..cae00e6debaf 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s); #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) struct cmd_info { - char *name; + const char *name; u32 opcode; #define F_LEN_MASK (1U<<0) @@ -425,7 +425,7 @@ struct cmd_info { struct cmd_entry { struct hlist_node hlist; - struct cmd_info *info; + const struct cmd_info *info; }; enum { @@ -474,7 +474,7 @@ struct parser_exec_state { int saved_buf_addr_type; bool is_ctx_wa; - struct cmd_info *info; + const struct cmd_info *info; struct intel_vgpu_workload *workload; }; @@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id) return cmd >> (32 - d_info->op_len); } -static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, +static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, int ring_id) { struct cmd_entry *e; @@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, return NULL; } -static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt, +static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt, u32 cmd, int ring_id) { u32 opcode; @@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s, return 0; } -static inline int get_cmd_length(struct cmd_info *info, u32 cmd) +static inline int get_cmd_length(const struct cmd_info *info, u32 cmd) { if ((info->flag & F_LEN_MASK) == F_LEN_CONST) return info->len; @@ -1643,7 +1643,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) { unsigned long gma = 0; - struct cmd_info *info; + const struct cmd_info *info; uint32_t cmd_len = 0; bool bb_end = false; struct intel_vgpu *vgpu = s->vgpu; @@ -1842,7 +1842,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) static int mi_noop_index; -static struct cmd_info cmd_info[] = { +static const struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL, @@ -2521,7 +2521,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) static int cmd_parser_exec(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; - struct cmd_info *info; + const struct cmd_info *info; u32 cmd; int ret = 0; @@ -2895,10 +2895,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, +static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, unsigned int opcode, unsigned long rings) { - struct cmd_info *info = NULL; + const struct cmd_info *info = NULL; unsigned int ring; for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { @@ -2913,7 +2913,7 @@ static int init_cmd_table(struct intel_gvt *gvt) { int i; struct cmd_entry *e; - struct cmd_info *info; + const struct cmd_info *info; unsigned int gen_type; gen_type = intel_gvt_get_device_type(gvt); diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 1fd64202d74e..6d787750d279 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync, TRACE_EVENT(gvt_command, TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, u32 buf_type, u32 buf_addr_type, - void *workload, char *cmd_name), + void *workload, const char *cmd_name), TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type, buf_addr_type, workload, cmd_name), -- cgit v1.2.3 From 36520ed005e71f6b8e2c23fa446b47d97410c173 Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:19:51 +0800 Subject: drm/i915/gvt: Add coffeelake platform definition Add D_CFL for CFL platform. Reviewed-by: Zhenyu Wang Signed-off-by: Fei Jiang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 2 ++ drivers/gpu/drm/i915/gvt/mmio.h | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..c1170f42b6a1 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_KBL; else if (IS_BROXTON(gvt->dev_priv)) return D_BXT; + else if (IS_COFFEELAKE(gvt->dev_priv)) + return D_CFL; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 1ffc69eba30e..5874f1cb4306 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -43,15 +43,16 @@ struct intel_vgpu; #define D_SKL (1 << 1) #define D_KBL (1 << 2) #define D_BXT (1 << 3) +#define D_CFL (1 << 4) -#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) -#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) +#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT | D_CFL) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL) -#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) -#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) +#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT | D_CFL) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL) #define D_PRE_SKL (D_BDW) -#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) +#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL) typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, unsigned int); -- cgit v1.2.3 From 5cd02703b0a497c502e8d8a8047f3a53bffacd20 Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:20:00 +0800 Subject: drm/i915/gvt: Add mmio handler for CFL Add registers of 0x4ab8 and 0x2248 into MMIO handler. Reviewed-by: Zhenyu Wang Signed-off-by: Fei Jiang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c1170f42b6a1..9910ba16d815 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3043,8 +3043,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x4ab8), D_KBL); - MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); + MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL); + MMIO_D(_MMIO(0x2248), D_SKL_PLUS); return 0; } -- cgit v1.2.3 From c3b5a8430daadf5b8ec9757d6c81149903cbe99f Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:20:07 +0800 Subject: drm/i915/gvt: Enable gfx virtualiztion for CFL Use INTEL_GEN to simplify the code for SKL+ platforms. v2: - split the enabling code into final one to identify any regression. Cc: Zhenyu Wang Reviewed-by: Zhenyu Wang Signed-off-by: Fei Jiang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 15 +++++---------- drivers/gpu/drm/i915/gvt/display.c | 12 ++++++++---- drivers/gpu/drm/i915/gvt/dmabuf.c | 4 +--- drivers/gpu/drm/i915/gvt/fb_decoder.c | 12 +++--------- drivers/gpu/drm/i915/gvt/handlers.c | 17 ++++++++--------- drivers/gpu/drm/i915/gvt/interrupt.c | 4 +--- drivers/gpu/drm/i915/gvt/mmio_context.c | 18 +++++++----------- drivers/gpu/drm/i915/gvt/scheduler.c | 7 +++---- 8 files changed, 36 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index cae00e6debaf..a04e8aa58547 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s, * It's good enough to support initializing mmio by lri command in * vgpu inhibit context on KBL. */ - if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && + if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv) + || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) && intel_gvt_mmio_is_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, @@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) return skl_decode_mi_display_flip(s, info); return -ENODEV; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index df1e14145747..4f25b6b7728e 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) { vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= @@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } - if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; } @@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv)) return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, resolution); else diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 2eb681175fae..3e7e2b80c857 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -163,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { unsigned int tiling_mode = 0; unsigned int stride = 0; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712a..dbd91ef28886 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { switch (tiled) { case PLANE_CTL_TILED_LINEAR: stride = stride_reg * 64; @@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (!plane->enabled) return -ENODEV; - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( val & PLANE_CTL_FORMAT_MASK, @@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, } plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, - (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) ? + (INTEL_GEN(dev_priv) >= 9) ? (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 9910ba16d815..68a62ba5bf54 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -283,9 +283,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); - if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv) - || IS_BROXTON(vgpu->gvt->dev_priv)) { + if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -891,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if ((IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv) - || IS_BROXTON(vgpu->gvt->dev_priv)) + if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; @@ -1409,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) { + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_COFFEELAKE(vgpu->gvt->dev_priv)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read @@ -1433,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, break; case SKL_PCODE_CDCLK_CONTROL: if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_COFFEELAKE(vgpu->gvt->dev_priv)) *data0 = SKL_CDCLK_READY_FOR_CHANGE; break; case GEN6_PCODE_READ_RC6VIDS: @@ -3304,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (ret) goto err; } else if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv)) { + || IS_KABYLAKE(dev_priv) + || IS_COFFEELAKE(dev_priv)) { ret = init_broadwell_mmio_info(gvt); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 6b9d1354ff29..67125c5eec6e 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -581,9 +581,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv) - || IS_BROXTON(gvt->dev_priv)) { + } else if (INTEL_GEN(gvt->dev_priv) >= 9) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 36a5147cd01e..893de7267b1f 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -351,8 +351,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || - IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))) + if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9)) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -389,7 +388,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) + || IS_COFFEELAKE(dev_priv)) && ring_id == RCS) return; if (!pre && !gen9_render_mocs.initialized) @@ -455,9 +455,7 @@ static void switch_mmio(struct intel_vgpu *pre, u32 old_v, new_v; dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_BROXTON(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) switch_mocs(pre, next, ring_id); for (mmio = dev_priv->gvt->engine_mmio_list.mmio; @@ -469,8 +467,8 @@ static void switch_mmio(struct intel_vgpu *pre, * state image on kabylake, it's initialized by lri command and * save or restore with context together. */ - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - && mmio->in_context) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) + || IS_COFFEELAKE(dev_priv)) && mmio->in_context) continue; // save @@ -563,9 +561,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (IS_SKYLAKE(gvt->dev_priv) || - IS_KABYLAKE(gvt->dev_priv) || - IS_BROXTON(gvt->dev_priv)) + if (INTEL_GEN(gvt->dev_priv) >= 9) gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; else gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1ad8c5e1455d..fb7445baa139 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) void *shadow_ring_buffer_va; u32 *cs; - if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)) + if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915) + || IS_COFFEELAKE(req->i915)) && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); @@ -939,9 +940,7 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; - bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv) - || IS_BROXTON(gvt->dev_priv); + bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); -- cgit v1.2.3 From 360f864e43aaf92f541838b181f21d82c624063a Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:20:14 +0800 Subject: drm/i915/gvt: Reuse the gmbus pin macro Reuse the gmbus pin macro from i915_reg.h file to improve readablity. Reviewed-by: Zhenyu Wang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/edid.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 5d4bb35bb889..752aa0fd1cc9 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -82,11 +82,11 @@ static inline int bxt_get_port_from_gmbus0(u32 gmbus0) int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; - if (port_select == 1) + if (port_select == GMBUS_PIN_1_BXT) port = PORT_B; - else if (port_select == 2) + else if (port_select == GMBUS_PIN_2_BXT) port = PORT_C; - else if (port_select == 3) + else if (port_select == GMBUS_PIN_3_BXT) port = PORT_D; return port; } @@ -96,13 +96,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0) int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; - if (port_select == 2) + if (port_select == GMBUS_PIN_VGADDC) port = PORT_E; - else if (port_select == 4) + else if (port_select == GMBUS_PIN_DPC) port = PORT_C; - else if (port_select == 5) + else if (port_select == GMBUS_PIN_DPB) port = PORT_B; - else if (port_select == 6) + else if (port_select == GMBUS_PIN_DPD) port = PORT_D; return port; } -- cgit v1.2.3 From 5807bb4d8dd640685313ad67218b2fe0770d1d03 Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:21:14 +0800 Subject: drm/i915/gvt: Refine port select logic for CFL platform Refine the code since the port select definition for CFL is different than SKL/BXT. v2: - replace PCH_CNP with IS_COFFEELAKE. (zhenyu) Cc: Zhenyu Wang Reviewed-by: Zhenyu Wang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/edid.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 752aa0fd1cc9..1fe6124918f1 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -77,6 +77,22 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) return chr; } +static inline int cnp_get_port_from_gmbus0(u32 gmbus0) +{ + int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; + int port = -EINVAL; + + if (port_select == GMBUS_PIN_1_BXT) + port = PORT_B; + else if (port_select == GMBUS_PIN_2_BXT) + port = PORT_C; + else if (port_select == GMBUS_PIN_3_BXT) + port = PORT_D; + else if (port_select == GMBUS_PIN_4_CNP) + port = PORT_E; + return port; +} + static inline int bxt_get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; @@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (IS_BROXTON(dev_priv)) port = bxt_get_port_from_gmbus0(pin_select); + else if (IS_COFFEELAKE(dev_priv)) + port = cnp_get_port_from_gmbus0(pin_select); else port = get_port_from_gmbus0(pin_select); if (WARN_ON(port < 0)) -- cgit v1.2.3 From 6c46c2e8c589a85501c3816d15264f3afcc9e023 Mon Sep 17 00:00:00 2001 From: fred gao Date: Wed, 9 Jan 2019 09:21:23 +0800 Subject: drm/i915: Enable gfx virtualization for Coffeelake platform Enable gfx virtualization for CFL. Reviewed-by: Zhenyu Wang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/intel_gvt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index d74e59e22c9d..1d7d26e4cf14 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_BROXTON(dev_priv)) return true; + if (IS_COFFEELAKE(dev_priv)) + return true; + return false; } -- cgit v1.2.3 From bd780f37a3617d3dda74b97013ae8aa9b07a1d91 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:09 +0000 Subject: drm/i915: Track all held rpm wakerefs Everytime we take a wakeref, record the stack trace of where it was taken; clearing the set if we ever drop back to no owners. For debugging a rpm leak, we can look at all the current wakerefs and check if they have a matching rpm_put. v2: Use skip=0 for unwinding the stack as it appears our noinline function doesn't appear on the stack (nor does save_stack_trace itself!) v3: Allow rpm->debug_count to disappear between inspections and so avoid calling krealloc(0) as that may return a ZERO_PTR not NULL! (Mika) v4: Show who last acquire/released the runtime pm Signed-off-by: Chris Wilson Cc: Jani Nikula Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Tested-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 3 +- drivers/gpu/drm/i915/i915_debugfs.c | 6 + drivers/gpu/drm/i915/i915_drv.c | 8 +- drivers/gpu/drm/i915/i915_drv.h | 20 ++ drivers/gpu/drm/i915/intel_drv.h | 44 ++-- drivers/gpu/drm/i915/intel_runtime_pm.c | 285 ++++++++++++++++++++--- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 8 +- 7 files changed, 324 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 9e36ffb5eb7c..ad4d71161dda 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -21,11 +21,11 @@ config DRM_I915_DEBUG select DEBUG_FS select PREEMPT_COUNT select I2C_CHARDEV + select STACKDEPOT select DRM_DP_AUX_CHARDEV select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y - select STACKDEPOT if DRM=y # for DRM_DEBUG_MM select DRM_DEBUG_SELFTEST select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM bool "Enable extra state checking for runtime PM" depends on DRM_I915 default n + select STACKDEPOT help Choose this option to turn on extra state checking for the runtime PM functionality. This may introduce overhead during diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 95813e21ae02..050cf8abd426 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2702,6 +2702,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) pci_power_name(pdev->current_state), pdev->current_state); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { + struct drm_printer p = drm_seq_file_printer(m); + + print_intel_runtime_pm_wakeref(dev_priv, &p); + } + return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 75652dc1e24c..5731f992cf44 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -905,6 +905,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->pps_mutex); i915_memcpy_init_early(dev_priv); + intel_runtime_pm_init_early(dev_priv); ret = i915_workqueues_init(dev_priv); if (ret < 0) @@ -1807,8 +1808,7 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_mmio(dev_priv); enable_rpm_wakeref_asserts(dev_priv); - - WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count)); + intel_runtime_pm_cleanup(dev_priv); } static void i915_driver_release(struct drm_device *dev) @@ -2010,6 +2010,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) out: enable_rpm_wakeref_asserts(dev_priv); + if (!dev_priv->uncore.user_forcewake.count) + intel_runtime_pm_cleanup(dev_priv); return ret; } @@ -2965,7 +2967,7 @@ static int intel_runtime_suspend(struct device *kdev) } enable_rpm_wakeref_asserts(dev_priv); - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); + intel_runtime_pm_cleanup(dev_priv); if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5df26ccda8a4..7e3566a0ba72 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include /* for struct drm_dma_handle */ @@ -1156,6 +1157,25 @@ struct i915_runtime_pm { atomic_t wakeref_count; bool suspended; bool irqs_enabled; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + /* + * To aide detection of wakeref leaks and general misuse, we + * track all wakeref holders. With manual markup (i.e. returning + * a cookie to each rpm_get caller which they then supply to their + * paired rpm_put) we can remove corresponding pairs of and keep + * the array trimmed to active wakerefs. + */ + struct intel_runtime_pm_debug { + spinlock_t lock; + + depot_stack_handle_t last_acquire; + depot_stack_handle_t last_release; + + depot_stack_handle_t *owners; + unsigned long count; + } debug; +#endif }; enum intel_pipe_crc_source { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1a11c2beb7f3..ac513fd70315 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,6 +41,8 @@ #include #include +struct drm_printer; + /** * __wait_for - magic wait macro * @@ -2084,6 +2086,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); void intel_init_quirks(struct drm_i915_private *dev_priv); /* intel_runtime_pm.c */ +void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv); int intel_power_domains_init(struct drm_i915_private *); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); @@ -2106,6 +2109,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); +void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv); const char * intel_display_power_domain_str(enum intel_display_power_domain domain); @@ -2123,23 +2127,23 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices); static inline void -assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) +assert_rpm_device_not_suspended(struct drm_i915_private *i915) { - WARN_ONCE(dev_priv->runtime_pm.suspended, + WARN_ONCE(i915->runtime_pm.suspended, "Device suspended during HW access\n"); } static inline void -assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) +assert_rpm_wakelock_held(struct drm_i915_private *i915) { - assert_rpm_device_not_suspended(dev_priv); - WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count), + assert_rpm_device_not_suspended(i915); + WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count), "RPM wakelock ref not held during HW access"); } /** * disable_rpm_wakeref_asserts - disable the RPM assert checks - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function disable asserts that check if we hold an RPM wakelock * reference, while keeping the device-not-suspended checks still enabled. @@ -2156,14 +2160,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) * enable_rpm_wakeref_asserts(). */ static inline void -disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +disable_rpm_wakeref_asserts(struct drm_i915_private *i915) { - atomic_inc(&dev_priv->runtime_pm.wakeref_count); + atomic_inc(&i915->runtime_pm.wakeref_count); } /** * enable_rpm_wakeref_asserts - re-enable the RPM assert checks - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function re-enables the RPM assert checks after disabling them with * disable_rpm_wakeref_asserts. It's meant to be used only in special @@ -2173,15 +2177,25 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) * disable_rpm_wakeref_asserts(). */ static inline void -enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) +enable_rpm_wakeref_asserts(struct drm_i915_private *i915) { - atomic_dec(&dev_priv->runtime_pm.wakeref_count); + atomic_dec(&i915->runtime_pm.wakeref_count); } -void intel_runtime_pm_get(struct drm_i915_private *dev_priv); -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); -void intel_runtime_pm_put(struct drm_i915_private *dev_priv); +void intel_runtime_pm_get(struct drm_i915_private *i915); +bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); +void intel_runtime_pm_get_noresume(struct drm_i915_private *i915); +void intel_runtime_pm_put(struct drm_i915_private *i915); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p); +#else +static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p) +{ +} +#endif void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 9e9501f82f06..08f809371bbd 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -29,6 +29,8 @@ #include #include +#include + #include "i915_drv.h" #include "intel_drv.h" @@ -49,6 +51,218 @@ * present for a given platform. */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +#include + +#define STACKDEPTH 8 + +static noinline depot_stack_handle_t __save_depot_stack(void) +{ + unsigned long entries[STACKDEPTH]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + .skip = 1, + }; + + save_stack_trace(&trace); + if (trace.nr_entries && + trace.entries[trace.nr_entries - 1] == ULONG_MAX) + trace.nr_entries--; + + return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN); +} + +static void __print_depot_stack(depot_stack_handle_t stack, + char *buf, int sz, int indent) +{ + unsigned long entries[STACKDEPTH]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + }; + + depot_fetch_stack(stack, &trace); + snprint_stack_trace(buf, sz, &trace, indent); +} + +static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + + spin_lock_init(&rpm->debug.lock); +} + +static noinline void +track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + depot_stack_handle_t stack, *stacks; + unsigned long flags; + + atomic_inc(&rpm->wakeref_count); + assert_rpm_wakelock_held(i915); + + if (!HAS_RUNTIME_PM(i915)) + return; + + stack = __save_depot_stack(); + if (!stack) + return; + + spin_lock_irqsave(&rpm->debug.lock, flags); + + if (!rpm->debug.count) + rpm->debug.last_acquire = stack; + + stacks = krealloc(rpm->debug.owners, + (rpm->debug.count + 1) * sizeof(*stacks), + GFP_NOWAIT | __GFP_NOWARN); + if (stacks) { + stacks[rpm->debug.count++] = stack; + rpm->debug.owners = stacks; + } + + spin_unlock_irqrestore(&rpm->debug.lock, flags); +} + +static int cmphandle(const void *_a, const void *_b) +{ + const depot_stack_handle_t * const a = _a, * const b = _b; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static void +__print_intel_runtime_pm_wakeref(struct drm_printer *p, + const struct intel_runtime_pm_debug *dbg) +{ + unsigned long i; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (dbg->last_acquire) { + __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref last acquired:\n%s", buf); + } + + if (dbg->last_release) { + __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref last released:\n%s", buf); + } + + drm_printf(p, "Wakeref count: %lu\n", dbg->count); + + sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); + + for (i = 0; i < dbg->count; i++) { + depot_stack_handle_t stack = dbg->owners[i]; + unsigned long rep; + + rep = 1; + while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) + rep++, i++; + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); + } + + kfree(buf); +} + +static noinline void +untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm_debug dbg = {}; + struct drm_printer p; + unsigned long flags; + + assert_rpm_wakelock_held(i915); + if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count, + &rpm->debug.lock, + flags)) { + dbg = rpm->debug; + + rpm->debug.owners = NULL; + rpm->debug.count = 0; + rpm->debug.last_release = __save_depot_stack(); + + spin_unlock_irqrestore(&rpm->debug.lock, flags); + } + if (!dbg.count) + return; + + p = drm_debug_printer("i915"); + __print_intel_runtime_pm_wakeref(&p, &dbg); + + kfree(dbg.owners); +} + +void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + struct drm_printer *p) +{ + struct intel_runtime_pm_debug dbg = {}; + + do { + struct i915_runtime_pm *rpm = &i915->runtime_pm; + unsigned long alloc = dbg.count; + depot_stack_handle_t *s; + + spin_lock_irq(&rpm->debug.lock); + dbg.count = rpm->debug.count; + if (dbg.count <= alloc) { + memcpy(dbg.owners, + rpm->debug.owners, + dbg.count * sizeof(*s)); + } + dbg.last_acquire = rpm->debug.last_acquire; + dbg.last_release = rpm->debug.last_release; + spin_unlock_irq(&rpm->debug.lock); + if (dbg.count <= alloc) + break; + + s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL); + if (!s) + goto out; + + dbg.owners = s; + } while (1); + + __print_intel_runtime_pm_wakeref(p, &dbg); + +out: + kfree(dbg.owners); +} + +#else + +static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ +} + +static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + atomic_inc(&i915->runtime_pm.wakeref_count); + assert_rpm_wakelock_held(i915); +} + +static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +{ + assert_rpm_wakelock_held(i915); + atomic_dec(&i915->runtime_pm.wakeref_count); +} + +#endif + bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -3986,7 +4200,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) /** * intel_runtime_pm_get - grab a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on) and ensures that it is powered up. @@ -3994,22 +4208,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. */ -void intel_runtime_pm_get(struct drm_i915_private *dev_priv) +void intel_runtime_pm_get(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; int ret; ret = pm_runtime_get_sync(kdev); WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); - atomic_inc(&dev_priv->runtime_pm.wakeref_count); - assert_rpm_wakelock_held(dev_priv); + track_intel_runtime_pm_wakeref(i915); } /** * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference if the device is * already in use and ensures that it is powered up. It is illegal to try @@ -4020,10 +4233,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) * * Returns: True if the wakeref was acquired, or False otherwise. */ -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) +bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { if (IS_ENABLED(CONFIG_PM)) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* @@ -4036,15 +4249,14 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) return false; } - atomic_inc(&dev_priv->runtime_pm.wakeref_count); - assert_rpm_wakelock_held(dev_priv); + track_intel_runtime_pm_wakeref(i915); return true; } /** * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on). @@ -4059,32 +4271,31 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. */ -void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) +void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(i915); pm_runtime_get_noresume(kdev); - atomic_inc(&dev_priv->runtime_pm.wakeref_count); + track_intel_runtime_pm_wakeref(i915); } /** * intel_runtime_pm_put - release a runtime pm reference - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *dev_priv) +void intel_runtime_pm_put(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; - assert_rpm_wakelock_held(dev_priv); - atomic_dec(&dev_priv->runtime_pm.wakeref_count); + untrack_intel_runtime_pm_wakeref(i915); pm_runtime_mark_last_busy(kdev); pm_runtime_put_autosuspend(kdev); @@ -4092,7 +4303,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) /** * intel_runtime_pm_enable - enable runtime pm - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function enables runtime pm at the end of the driver load sequence. * @@ -4100,9 +4311,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) * subordinate display power domains. That is done by * intel_power_domains_enable(). */ -void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) +void intel_runtime_pm_enable(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* @@ -4124,7 +4335,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) * so the driver's own RPM reference tracking asserts also work on * platforms without RPM support. */ - if (!HAS_RUNTIME_PM(dev_priv)) { + if (!HAS_RUNTIME_PM(i915)) { int ret; pm_runtime_dont_use_autosuspend(kdev); @@ -4142,17 +4353,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) pm_runtime_put_autosuspend(kdev); } -void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) +void intel_runtime_pm_disable(struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; /* Transfer rpm ownership back to core */ - WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, + WARN(pm_runtime_get_sync(kdev) < 0, "Failed to pass rpm ownership back to core\n"); pm_runtime_dont_use_autosuspend(kdev); - if (!HAS_RUNTIME_PM(dev_priv)) + if (!HAS_RUNTIME_PM(i915)) pm_runtime_put(kdev); } + +void intel_runtime_pm_cleanup(struct drm_i915_private *i915) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + int count; + + count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */ + WARN(count, + "i915->runtime_pm.wakeref_count=%d on cleanup\n", + count); + + untrack_intel_runtime_pm_wakeref(i915); +} + +void intel_runtime_pm_init_early(struct drm_i915_private *i915) +{ + init_intel_runtime_pm_wakeref(i915); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index baa3c38919de..082809569681 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -154,15 +154,17 @@ struct drm_i915_private *mock_gem_device(void) pdev->dev.archdata.iommu = (void *)-1; #endif + i915 = (struct drm_i915_private *)(pdev + 1); + pci_set_drvdata(pdev, i915); + + intel_runtime_pm_init_early(i915); + dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - i915 = (struct drm_i915_private *)(pdev + 1); - pci_set_drvdata(pdev, i915); - err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); if (err) { pr_err("Failed to initialise mock GEM device: err=%d\n", err); -- cgit v1.2.3 From 16e4dd0342a804090fd0958bb271d3a6b57056ac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:10 +0000 Subject: drm/i915: Markup paired operations on wakerefs The majority of runtime-pm operations are bounded and scoped within a function; these are easy to verify that the wakeref are handled correctly. We can employ the compiler to help us, and reduce the number of wakerefs tracked when debugging, by passing around cookies provided by the various rpm_get functions to their rpm_put counterpart. This makes the pairing explicit, and given the required wakeref cookie the compiler can verify that we pass an initialised value to the rpm_put (quite handy for double checking error paths). For regular builds, the compiler should be able to eliminate the unused local variables and the program growth should be minimal. Fwiw, it came out as a net improvement as gcc was able to refactor rpm_get and rpm_get_if_in_use together, v2: Just s/rpm_put/rpm_put_unchecked/ everywhere, leaving the manual mark up for smaller more targeted patches. v3: Mention the cookie in Returns Signed-off-by: Chris Wilson Cc: Jani Nikula Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/aperture_gm.c | 8 +- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/sched_policy.c | 2 +- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 54 ++++++------ drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 20 ++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +- drivers/gpu/drm/i915/i915_gem_shrinker.c | 10 +-- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/i915_perf.c | 4 +- drivers/gpu/drm/i915/i915_pmu.c | 6 +- drivers/gpu/drm/i915/i915_sysfs.c | 12 +-- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 15 +++- drivers/gpu/drm/i915/intel_engine_cs.c | 4 +- drivers/gpu/drm/i915/intel_fbdev.c | 4 +- drivers/gpu/drm/i915/intel_guc_log.c | 6 +- drivers/gpu/drm/i915/intel_hotplug.c | 2 +- drivers/gpu/drm/i915/intel_huc.c | 2 +- drivers/gpu/drm/i915/intel_panel.c | 2 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 97 ++++++++++++++++++---- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/selftests/huge_pages.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem.c | 10 +-- .../gpu/drm/i915/selftests/i915_gem_coherency.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 10 +-- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 6 +- drivers/gpu/drm/i915/selftests/i915_request.c | 8 +- drivers/gpu/drm/i915/selftests/intel_guc.c | 4 +- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 6 +- drivers/gpu/drm/i915/selftests/intel_lrc.c | 10 +-- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 10 +-- 37 files changed, 209 insertions(+), 139 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 359d37d5c958..1fa2f65c3cd1 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) @@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -219,7 +219,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return -ENOSPC; } @@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b4ab1dad0143..435c746c3f73 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -597,7 +597,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c32e7d5e8629..f04b3b965bfc 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1ad8c5e1455d..3816dcae2185 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -997,7 +997,7 @@ complete: intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(gvt->dev_priv); + intel_runtime_pm_put_unchecked(gvt->dev_priv); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1451,7 +1451,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } if (ret && (vgpu_is_vm_unhealthy(ret))) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 050cf8abd426..6818079669a7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -877,7 +877,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -953,7 +953,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) intel_runtime_pm_get(i915); gpu = i915_capture_gpu_state(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -1226,7 +1226,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret; } @@ -1292,7 +1292,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", @@ -1579,7 +1579,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) else err = ironlake_drpc_info(m); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return err; } @@ -1632,7 +1632,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -1695,7 +1695,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -1723,7 +1723,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); @@ -1756,7 +1756,7 @@ static int i915_emon_status(struct seq_file *m, void *unused) seq_printf(m, "GFX power: %ld\n", gfx); seq_printf(m, "Total power: %ld\n", chipset + gfx); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -1805,7 +1805,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) mutex_unlock(&dev_priv->pcu_lock); out: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret; } @@ -2017,7 +2017,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -2067,7 +2067,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) act_freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } seq_printf(m, "RPS enabled? %d\n", rps->enabled); @@ -2160,7 +2160,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -2192,7 +2192,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) for (i = 0; i < 16; i++) seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -2601,7 +2601,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) dev_priv->psr.last_exit); } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -2632,7 +2632,7 @@ retry: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret; } @@ -2665,7 +2665,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return -ENODEV; } @@ -2673,7 +2673,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) power = I915_READ(MCH_SECP_NRG_STTS); power = (1000000 * power) >> units; /* convert to uJ */ - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); seq_printf(m, "%llu", power); @@ -2775,7 +2775,7 @@ out: seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -3114,7 +3114,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -3139,7 +3139,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -3265,7 +3265,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, dev_priv->wm.distrust_bios_wm = true; dev_priv->ipc_enabled = enable; intel_enable_ipc(dev_priv); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return len; } @@ -4090,7 +4090,7 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_drain_freed_objects(i915); out: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return ret; } @@ -4112,7 +4112,7 @@ i915_cache_sharing_get(void *data, u64 *val) snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -4140,7 +4140,7 @@ i915_cache_sharing_set(void *data, u64 val) snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return 0; } @@ -4388,7 +4388,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) gen10_sseu_device_status(dev_priv, &sseu); } - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); i915_print_sseu_info(m, false, &sseu); @@ -4416,7 +4416,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; intel_uncore_forcewake_user_put(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7e3566a0ba72..e9c909c43759 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -131,6 +131,8 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) +typedef depot_stack_handle_t intel_wakeref_t; + enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 74710e5d946e..640e6361dda3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -175,7 +175,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return i915->gt.epoch; } @@ -814,7 +814,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } static void @@ -1149,7 +1149,7 @@ out_unpin: i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1356,7 +1356,7 @@ out_unpin: i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1968,7 +1968,7 @@ err_unpin: err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -2068,7 +2068,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) @@ -4765,7 +4765,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (on) cond_resched(); } - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static void i915_gem_flush_free_objects(struct drm_i915_private *i915) @@ -4901,7 +4901,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_engines_sanitize(i915, false); intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); @@ -4965,12 +4965,12 @@ int i915_gem_suspend(struct drm_i915_private *i915) if (WARN_ON(!intel_engines_are_idle(i915))) i915_gem_set_wedged(i915); /* no hope, discard everything */ - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return 0; err_unlock: mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e7994505d850..c80943698ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -2424,7 +2424,7 @@ err_vma: eb_release_vmas(&eb); mutex_unlock(&dev->struct_mutex); err_rpm: - intel_runtime_pm_put(eb.i915); + intel_runtime_pm_put_unchecked(eb.i915); i915_gem_context_put(eb.ctx); err_destroy: eb_destroy(&eb); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d67c07cdd0b8..b3391070acf7 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -258,7 +258,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, */ if (intel_runtime_pm_get_if_in_use(fence->i915)) { fence_write(fence, vma); - intel_runtime_pm_put(fence->i915); + intel_runtime_pm_put_unchecked(fence->i915); } if (vma) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a8807fbed0aa..51f80ddd938d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2536,7 +2536,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2556,7 +2556,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma) intel_runtime_pm_get(i915); vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static int aliasing_gtt_bind_vma(struct i915_vma *vma, @@ -2590,7 +2590,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } return 0; @@ -2603,7 +2603,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) if (vma->flags & I915_VMA_GLOBAL_BIND) { intel_runtime_pm_get(i915); vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } if (vma->flags & I915_VMA_LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 6cc2b964c955..2bef02d0883d 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -265,7 +265,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (flags & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); i915_retire_requests(i915); @@ -299,7 +299,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return freed; } @@ -377,7 +377,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } shrinker_unlock(i915, unlock); @@ -397,7 +397,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) freed_pages = i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -451,7 +451,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_VMAPS); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 288b0662f7b7..787a9ed1ef7d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3374,7 +3374,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, wake_up_all(&dev_priv->gpu_error.reset_queue); out: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } /* Called from drm generic code, passed 'crtc' which diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 5b1ae5ed97b3..e4dfd1477c78 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -2123,7 +2123,7 @@ err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); err_config: if (stream->ctx) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..c99fcfce79d5 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -210,7 +210,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) if (fw) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } static void @@ -231,7 +231,7 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) intel_runtime_pm_get_if_in_use(dev_priv)) { val = intel_get_cagf(dev_priv, I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], @@ -448,7 +448,7 @@ static u64 get_rc6(struct drm_i915_private *i915) if (intel_runtime_pm_get_if_in_use(i915)) { val = __get_rc6(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); /* * If we are coming back from being runtime suspended we must diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c0cfe7ae2ba5..53c20e103d56 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -46,7 +46,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, intel_runtime_pm_get(dev_priv); res = intel_rc6_residency_us(dev_priv, reg); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return DIV_ROUND_CLOSEST_ULL(res, 1000); } @@ -274,7 +274,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, } mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return snprintf(buf, PAGE_SIZE, "%d\n", ret); } @@ -371,7 +371,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val > rps->max_freq || val < rps->min_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return -EINVAL; } @@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret ?: count; } @@ -429,7 +429,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val > rps->max_freq || val > rps->max_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return -EINVAL; } @@ -446,7 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret ?: count; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1cc441f06c73..a980d5d1e601 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2101,7 +2101,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return vma; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ac513fd70315..a1e4e1033289 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "i915_drv.h" #include @@ -2182,10 +2183,16 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *i915) atomic_dec(&i915->runtime_pm.wakeref_count); } -void intel_runtime_pm_get(struct drm_i915_private *i915); -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); -void intel_runtime_pm_get_noresume(struct drm_i915_private *i915); -void intel_runtime_pm_put(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); + +void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); +#else +#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915) +#endif #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 5990f8500bca..2e60463f2468 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -928,7 +928,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) idle = false; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return idle; } @@ -1485,7 +1485,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, if (intel_runtime_pm_get_if_in_use(engine->i915)) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(engine->i915); + intel_runtime_pm_put_unchecked(engine->i915); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index a0c5046e170c..215e5894842d 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -276,7 +276,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index d3ebdbc0182e..1b1581a42aa1 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -445,7 +445,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log) */ intel_runtime_pm_get(dev_priv); guc_action_flush_log_complete(guc); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } int intel_guc_log_create(struct intel_guc_log *log) @@ -528,7 +528,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), GUC_LOG_LEVEL_IS_ENABLED(level), GUC_LOG_LEVEL_TO_VERBOSITY(level)); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; @@ -610,7 +610,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) intel_runtime_pm_get(i915); guc_action_flush_log(guc); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index ae92d6560165..b1a9cb960ca4 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -261,7 +261,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } bool intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index bc27b691d824..c2b076e9bada 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -122,7 +122,7 @@ int intel_huc_check_status(struct intel_huc *huc) intel_runtime_pm_get(dev_priv); status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return status; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index ee3e0842d542..c2b7455a023e 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1213,7 +1213,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); drm_modeset_unlock(&dev->mode_config.connection_mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 08f809371bbd..c29577d7a35a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -94,7 +94,7 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) spin_lock_init(&rpm->debug.lock); } -static noinline void +static noinline depot_stack_handle_t track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) { struct i915_runtime_pm *rpm = &i915->runtime_pm; @@ -105,11 +105,11 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) assert_rpm_wakelock_held(i915); if (!HAS_RUNTIME_PM(i915)) - return; + return -1; stack = __save_depot_stack(); if (!stack) - return; + return -1; spin_lock_irqsave(&rpm->debug.lock, flags); @@ -122,9 +122,57 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) if (stacks) { stacks[rpm->debug.count++] = stack; rpm->debug.owners = stacks; + } else { + stack = -1; } spin_unlock_irqrestore(&rpm->debug.lock, flags); + + return stack; +} + +static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915, + depot_stack_handle_t stack) +{ + struct i915_runtime_pm *rpm = &i915->runtime_pm; + unsigned long flags, n; + bool found = false; + + if (unlikely(stack == -1)) + return; + + spin_lock_irqsave(&rpm->debug.lock, flags); + for (n = rpm->debug.count; n--; ) { + if (rpm->debug.owners[n] == stack) { + memmove(rpm->debug.owners + n, + rpm->debug.owners + n + 1, + (--rpm->debug.count - n) * sizeof(stack)); + found = true; + break; + } + } + spin_unlock_irqrestore(&rpm->debug.lock, flags); + + if (WARN(!found, + "Unmatched wakeref (tracking %lu), count %u\n", + rpm->debug.count, atomic_read(&rpm->wakeref_count))) { + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); + + stack = READ_ONCE(rpm->debug.last_release); + if (stack) { + __print_depot_stack(stack, buf, PAGE_SIZE, 2); + DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); + } + + kfree(buf); + } } static int cmphandle(const void *_a, const void *_b) @@ -249,10 +297,12 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) { } -static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +static depot_stack_handle_t +track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) { atomic_inc(&i915->runtime_pm.wakeref_count); assert_rpm_wakelock_held(i915); + return -1; } static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) @@ -1852,7 +1902,7 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); if (!is_enabled) - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return is_enabled; } @@ -1886,7 +1936,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); } #define I830_PIPES_POWER_DOMAINS ( \ @@ -3994,7 +4044,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) { /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) @@ -4207,8 +4257,10 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -void intel_runtime_pm_get(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) { struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; @@ -4217,7 +4269,7 @@ void intel_runtime_pm_get(struct drm_i915_private *i915) ret = pm_runtime_get_sync(kdev); WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); - track_intel_runtime_pm_wakeref(i915); + return track_intel_runtime_pm_wakeref(i915); } /** @@ -4231,9 +4283,10 @@ void intel_runtime_pm_get(struct drm_i915_private *i915) * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. * - * Returns: True if the wakeref was acquired, or False otherwise. + * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates + * as True if the wakeref was acquired, or False otherwise. */ -bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { if (IS_ENABLED(CONFIG_PM)) { struct pci_dev *pdev = i915->drm.pdev; @@ -4246,12 +4299,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) * atm to the late/early system suspend/resume handlers. */ if (pm_runtime_get_if_in_use(kdev) <= 0) - return false; + return 0; } - track_intel_runtime_pm_wakeref(i915); - - return true; + return track_intel_runtime_pm_wakeref(i915); } /** @@ -4270,8 +4321,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; @@ -4279,7 +4332,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) assert_rpm_wakelock_held(i915); pm_runtime_get_noresume(kdev); - track_intel_runtime_pm_wakeref(i915); + return track_intel_runtime_pm_wakeref(i915); } /** @@ -4290,7 +4343,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *i915) +void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) { struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; @@ -4301,6 +4354,14 @@ void intel_runtime_pm_put(struct drm_i915_private *i915) pm_runtime_put_autosuspend(kdev); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +{ + cancel_intel_runtime_pm_wakeref(i915, wref); + intel_runtime_pm_put_unchecked(i915); +} +#endif + /** * intel_runtime_pm_enable - enable runtime pm * @i915: i915 device instance diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index fff468f17d2d..8d4c76ac0e7d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1709,7 +1709,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, reg->val = I915_READ8(entry->offset_ldw); else ret = -EINVAL; - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 6c10734e948d..a4d8b12be12c 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1785,7 +1785,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index bdcc53e15e75..762e1a7125f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -32,7 +32,7 @@ static int switch_to_context(struct drm_i915_private *i915, i915_request_add(rq); } - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return err; } @@ -76,7 +76,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static int pm_prepare(struct drm_i915_private *i915) @@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915) i915_gem_suspend_gtt_mappings(i915); i915_gem_suspend_late(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static void pm_hibernate(struct drm_i915_private *i915) @@ -110,7 +110,7 @@ static void pm_hibernate(struct drm_i915_private *i915) i915_gem_freeze(i915); i915_gem_freeze_late(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static void pm_resume(struct drm_i915_private *i915) @@ -125,7 +125,7 @@ static void pm_resume(struct drm_i915_private *i915) i915_gem_sanitize(i915); i915_gem_resume(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index f7392c1ffe75..eea4fc2445ae 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -376,7 +376,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index d00cdf3c2939..6e1a0711d201 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -243,7 +243,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -609,7 +609,7 @@ static int igt_ctx_exec(void *arg) intel_runtime_pm_get(i915); err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -715,7 +715,7 @@ static int igt_ctx_readonly(void *arg) intel_runtime_pm_get(i915); err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -1067,7 +1067,7 @@ static int igt_vm_isolation(void *arg) count, RUNTIME_INFO(i915)->num_rings); out_rpm: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); out_unlock: if (end_live_test(&t)) err = -EIO; @@ -1200,7 +1200,7 @@ out_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 4365979d8222..8d22f73a9b63 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -464,7 +464,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a9ed0ecc94e2..87cb0602a5fc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -295,7 +295,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, intel_runtime_pm_get(i915); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } count = n; @@ -1216,7 +1216,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index be7ecb66ad11..b03890c590d7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -444,7 +444,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: @@ -508,7 +508,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) if (!i915->gt.active_requests++) { intel_runtime_pm_get(i915); i915_gem_unpark(i915); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); } mutex_unlock(&i915->drm.struct_mutex); cancel_delayed_work_sync(&i915->gt.retire_work); @@ -590,7 +590,7 @@ static int igt_mmap_offset_exhaustion(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); err = make_obj_busy(obj); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 07e557815308..e8880cabd5c7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -403,7 +403,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -553,7 +553,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -731,7 +731,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -860,7 +860,7 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 32cba4cae31a..3590ba3d8897 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -225,7 +225,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -337,7 +337,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put_unchecked(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 797cf5e6d6d4..58cba8188bd2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -402,7 +402,7 @@ static int igt_wedged_reset(void *arg) i915_reset(i915, ALL_ENGINES, NULL); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); @@ -1636,7 +1636,7 @@ out: force_reset(i915); unlock: - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); @@ -1679,7 +1679,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 00caaa00f02f..ac1b18a17f3c 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -65,7 +65,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -158,7 +158,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -251,7 +251,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -374,7 +374,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -627,7 +627,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915); + intel_runtime_pm_put_unchecked(smoke.i915); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 8b3f3200a3bd..b1b39c70c702 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -94,7 +94,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) intel_runtime_pm_get(engine->i915); rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915); + intel_runtime_pm_put_unchecked(engine->i915); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -241,7 +241,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine, else rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915); + intel_runtime_pm_put_unchecked(engine->i915); kernel_context_close(ctx); @@ -300,7 +300,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, intel_runtime_pm_get(i915); err = reset(engine); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); if (want_spin) { igt_spinner_end(&spin); @@ -414,7 +414,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg) out: reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -496,7 +496,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) err: reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915); + intel_runtime_pm_put_unchecked(i915); igt_global_reset_unlock(i915); kernel_context_close(ctx); -- cgit v1.2.3 From 506d1f62454b27535591cdc20e4148a736d0da66 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:11 +0000 Subject: drm/i915: Track GT wakeref Record the wakeref used for keeping the device awake as the GPU is executing requests and be sure to cancel the tracking upon parking. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9c909c43759..f33dc8a1fd1b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1979,7 +1979,7 @@ struct drm_i915_private { * In order to reduce the effect on performance, there * is a slight delay before we do so. */ - bool awake; + intel_wakeref_t awake; /** * The number of times we have woken up. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 640e6361dda3..abd5d83fb0e5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -138,6 +138,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static u32 __i915_gem_park(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; + GEM_TRACE("\n"); lockdep_assert_held(&i915->drm.struct_mutex); @@ -168,14 +170,15 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) i915_pmu_gt_parked(i915); i915_vma_parked(i915); - i915->gt.awake = false; + wakeref = fetch_and_zero(&i915->gt.awake); + GEM_BUG_ON(!wakeref); if (INTEL_GEN(i915) >= 6) gen6_rps_idle(i915); intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return i915->gt.epoch; } @@ -204,7 +207,8 @@ void i915_gem_unpark(struct drm_i915_private *i915) if (i915->gt.awake) return; - intel_runtime_pm_get_noresume(i915); + i915->gt.awake = intel_runtime_pm_get_noresume(i915); + GEM_BUG_ON(!i915->gt.awake); /* * It seems that the DMC likes to transition between the DC states a lot @@ -219,7 +223,6 @@ void i915_gem_unpark(struct drm_i915_private *i915) */ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); - i915->gt.awake = true; if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ i915->gt.epoch = 1; -- cgit v1.2.3 From 183e260ba2467be4c790781af0eb8a834bda65a7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:12 +0000 Subject: drm/i915: Track the rpm wakerefs for error handling Keep hold of the local wakeref used in error handling, to cancel the tracking upon release so that leaks can be identified. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 787a9ed1ef7d..94187e68d39a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3291,6 +3291,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, const char *fmt, ...) { struct intel_engine_cs *engine; + intel_wakeref_t wakeref; unsigned int tmp; char error_msg[80]; char *msg = NULL; @@ -3312,7 +3313,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); engine_mask &= INTEL_INFO(dev_priv)->ring_mask; @@ -3374,7 +3375,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, wake_up_all(&dev_priv->gpu_error.reset_queue); out: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } /* Called from drm generic code, passed 'crtc' which -- cgit v1.2.3 From 48d1c812160712429c8eacbe7ca7d72c2d06ae26 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:13 +0000 Subject: drm/i915: Mark up sysfs with rpm wakeref tracking As sysfs has a simple pattern of taking a rpm wakeref around the user access, we can track the local reference and drop it as soon as possible. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_sysfs.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 53c20e103d56..2cbbf165d179 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -42,11 +42,12 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) static u32 calc_residency(struct drm_i915_private *dev_priv, i915_reg_t reg) { + intel_wakeref_t wakeref; u64 res; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); res = intel_rc6_residency_us(dev_priv, reg); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return DIV_ROUND_CLOSEST_ULL(res, 1000); } @@ -258,9 +259,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + intel_wakeref_t wakeref; int ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -274,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, } mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return snprintf(buf, PAGE_SIZE, "%d\n", ret); } @@ -354,6 +356,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; u32 val; ssize_t ret; @@ -361,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); @@ -371,7 +374,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val > rps->max_freq || val < rps->min_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return -EINVAL; } @@ -392,7 +395,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret ?: count; } @@ -412,6 +415,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; u32 val; ssize_t ret; @@ -419,7 +423,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->pcu_lock); @@ -429,7 +433,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val > rps->max_freq || val > rps->max_freq_softlimit) { mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return -EINVAL; } @@ -446,7 +450,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->pcu_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret ?: count; } -- cgit v1.2.3 From a037121c3c7fb7e3d88f9a27d3d77581404f9b1d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:14 +0000 Subject: drm/i915: Mark up debugfs with rpm wakeref tracking As debugfs has a simple pattern of taking a rpm wakeref around the user access, we can track the local reference and drop it as soon as possible. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 135 ++++++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6818079669a7..66c520ba0df8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -674,9 +674,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int i, pipe; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (IS_CHERRYVIEW(dev_priv)) { seq_printf(m, "Master Interrupt Control:\t%08x\n", @@ -877,7 +878,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } } - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -950,10 +951,11 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; struct i915_gpu_state *gpu; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); gpu = i915_capture_gpu_state(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -1012,9 +1014,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt_pm.rps; + intel_wakeref_t wakeref; int ret = 0; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (IS_GEN(dev_priv, 5)) { u16 rgvswctl = I915_READ16(MEMSWCTL); @@ -1226,7 +1229,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -1265,6 +1268,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) u64 acthd[I915_NUM_ENGINES]; u32 seqno[I915_NUM_ENGINES]; struct intel_instdone instdone; + intel_wakeref_t wakeref; enum intel_engine_id id; if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) @@ -1283,7 +1287,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); for_each_engine(engine, dev_priv, id) { acthd[id] = intel_engine_get_active_head(engine); @@ -1292,7 +1296,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", @@ -1568,9 +1572,10 @@ static int gen6_drpc_info(struct seq_file *m) static int i915_drpc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; int err; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) err = vlv_drpc_info(m); @@ -1579,7 +1584,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) else err = ironlake_drpc_info(m); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return err; } @@ -1601,11 +1606,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_fbc *fbc = &dev_priv->fbc; + intel_wakeref_t wakeref; if (!HAS_FBC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&fbc->lock); if (intel_fbc_is_active(dev_priv)) @@ -1632,7 +1638,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -1677,11 +1683,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; if (!HAS_IPS(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "Enabled by kernel parameter: %s\n", yesno(i915_modparams.enable_ips)); @@ -1695,7 +1702,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -1703,9 +1710,10 @@ static int i915_ips_status(struct seq_file *m, void *unused) static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; bool sr_enabled = false; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); if (INTEL_GEN(dev_priv) >= 9) @@ -1723,7 +1731,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); @@ -1735,29 +1743,30 @@ static int i915_emon_status(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; unsigned long temp, chipset, gfx; + intel_wakeref_t wakeref; int ret; if (!IS_GEN(dev_priv, 5)) return -ENODEV; - intel_runtime_pm_get(dev_priv); - ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; + wakeref = intel_runtime_pm_get(dev_priv); + temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); gfx = i915_gfx_val(dev_priv); mutex_unlock(&dev->struct_mutex); + intel_runtime_pm_put(dev_priv, wakeref); + seq_printf(m, "GMCH temp: %ld\n", temp); seq_printf(m, "Chipset power: %ld\n", chipset); seq_printf(m, "GFX power: %ld\n", gfx); seq_printf(m, "Total power: %ld\n", chipset + gfx); - intel_runtime_pm_put_unchecked(dev_priv); - return 0; } @@ -1766,13 +1775,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt_pm.rps; unsigned int max_gpu_freq, min_gpu_freq; + intel_wakeref_t wakeref; int gpu_freq, ia_freq; int ret; if (!HAS_LLC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) @@ -1805,7 +1815,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) mutex_unlock(&dev_priv->pcu_lock); out: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -1978,8 +1988,9 @@ static const char *swizzle_string(unsigned swizzle) static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); @@ -2017,7 +2028,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -2054,9 +2065,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) struct drm_device *dev = &dev_priv->drm; struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 act_freq = rps->cur_freq; + intel_wakeref_t wakeref; struct drm_file *file; - if (intel_runtime_pm_get_if_in_use(dev_priv)) { + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); act_freq = vlv_punit_read(dev_priv, @@ -2067,7 +2080,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) act_freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } seq_printf(m, "RPS enabled? %d\n", rps->enabled); @@ -2150,6 +2163,7 @@ static int i915_llc(struct seq_file *m, void *data) static int i915_huc_load_status_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct drm_printer p; if (!HAS_HUC(dev_priv)) @@ -2158,9 +2172,9 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->huc.fw, &p); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -2168,6 +2182,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) static int i915_guc_load_status_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct drm_printer p; u32 tmp, i; @@ -2177,7 +2192,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->guc.fw, &p); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); tmp = I915_READ(GUC_STATUS); @@ -2192,7 +2207,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) for (i = 0; i < 16; i++) seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -2550,6 +2565,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; u32 psrperf = 0; bool enabled = false; bool sink_support; @@ -2562,7 +2578,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) if (!sink_support) return 0; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); seq_printf(m, "PSR mode: %s\n", @@ -2601,7 +2617,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) dev_priv->psr.last_exit); } - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -2610,6 +2626,7 @@ i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; struct drm_modeset_acquire_ctx ctx; + intel_wakeref_t wakeref; int ret; if (!CAN_PSR(dev_priv)) @@ -2617,7 +2634,7 @@ i915_edp_psr_debug_set(void *data, u64 val) DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); @@ -2632,7 +2649,7 @@ retry: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } @@ -2657,15 +2674,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); unsigned long long power; + intel_wakeref_t wakeref; u32 units; if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return -ENODEV; } @@ -2673,7 +2691,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) power = I915_READ(MCH_SECP_NRG_STTS); power = (1000000 * power) >> units; /* convert to uJ */ - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); seq_printf(m, "%llu", power); @@ -2742,6 +2760,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) static int i915_dmc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; struct intel_csr *csr; if (!HAS_CSR(dev_priv)) @@ -2749,7 +2768,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) csr = &dev_priv->csr; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); seq_printf(m, "path: %s\n", csr->fw_path); @@ -2775,7 +2794,7 @@ out: seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -3065,8 +3084,10 @@ static int i915_display_info(struct seq_file *m, void *unused) struct intel_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(dev_priv); - intel_runtime_pm_get(dev_priv); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); for_each_intel_crtc(dev, crtc) { @@ -3114,7 +3135,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -3123,10 +3144,11 @@ static int i915_engine_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; + intel_wakeref_t wakeref; enum intel_engine_id id; struct drm_printer p; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); seq_printf(m, "GT awake? %s (epoch %u)\n", yesno(dev_priv->gt.awake), dev_priv->gt.epoch); @@ -3139,7 +3161,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -3252,6 +3274,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; + intel_wakeref_t wakeref; int ret; bool enable; @@ -3259,13 +3282,15 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, if (ret < 0) return ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); + if (!dev_priv->ipc_enabled && enable) DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); dev_priv->wm.distrust_bios_wm = true; dev_priv->ipc_enabled = enable; intel_enable_ipc(dev_priv); - intel_runtime_pm_put_unchecked(dev_priv); + + intel_runtime_pm_put(dev_priv, wakeref); return len; } @@ -4031,11 +4056,12 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_i915_private *i915 = data; + intel_wakeref_t wakeref; int ret = 0; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) i915_gem_set_wedged(i915); @@ -4090,7 +4116,7 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_drain_freed_objects(i915); out: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return ret; } @@ -4103,16 +4129,17 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; + intel_wakeref_t wakeref; u32 snpcr; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -4123,6 +4150,7 @@ static int i915_cache_sharing_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; + intel_wakeref_t wakeref; u32 snpcr; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) @@ -4131,7 +4159,7 @@ i915_cache_sharing_set(void *data, u64 val) if (val > 3) return -EINVAL; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); /* Update the cache sharing policy here as well */ @@ -4140,7 +4168,7 @@ i915_cache_sharing_set(void *data, u64 val) snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -4362,6 +4390,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct sseu_dev_info sseu; + intel_wakeref_t wakeref; if (INTEL_GEN(dev_priv) < 8) return -ENODEV; @@ -4376,7 +4405,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) sseu.max_eus_per_subslice = RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (IS_CHERRYVIEW(dev_priv)) { cherryview_sseu_device_status(dev_priv, &sseu); @@ -4388,7 +4417,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) gen10_sseu_device_status(dev_priv, &sseu); } - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); i915_print_sseu_info(m, false, &sseu); -- cgit v1.2.3 From 6619c0075f784d7720fc9810279c956d51b22aaf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:15 +0000 Subject: drm/i915/perf: Track the rpm wakeref Keep track of our wakeref used to keep the device awake so we can catch any leak. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-7-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_perf.c | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f33dc8a1fd1b..b6d0cd890a19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1333,6 +1333,8 @@ struct i915_perf_stream { */ struct list_head link; + intel_wakeref_t wakeref; + /** * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` * properties given when opening a stream, representing the contents diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e4dfd1477c78..faff6cf1aaa1 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -2087,7 +2087,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - intel_runtime_pm_get(dev_priv); + stream->wakeref = intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); @@ -2123,7 +2123,7 @@ err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, stream->wakeref); err_config: if (stream->ctx) -- cgit v1.2.3 From 00e27cbe4c8960b79324ccd48a6077e251e42bd7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:16 +0000 Subject: drm/i915/pmu: Track rpm wakeref Track the wakeref used for temporary access to the device, and discard it upon release so that leaks can be identified. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pmu.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index c99fcfce79d5..3d43fc9dd25d 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -167,6 +167,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; bool fw = false; if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) @@ -175,7 +176,8 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) if (!dev_priv->gt.awake) return; - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return; for_each_engine(engine, dev_priv, id) { @@ -210,7 +212,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) if (fw) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } static void @@ -227,11 +229,15 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) u32 val; val = dev_priv->gt_pm.rps.cur_freq; - if (dev_priv->gt.awake && - intel_runtime_pm_get_if_in_use(dev_priv)) { - val = intel_get_cagf(dev_priv, - I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put_unchecked(dev_priv); + if (dev_priv->gt.awake) { + intel_wakeref_t wakeref = + intel_runtime_pm_get_if_in_use(dev_priv); + + if (wakeref) { + val = intel_get_cagf(dev_priv, + I915_READ_NOTRACE(GEN6_RPSTAT1)); + intel_runtime_pm_put(dev_priv, wakeref); + } } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], @@ -443,12 +449,14 @@ static u64 __get_rc6(struct drm_i915_private *i915) static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) + intel_wakeref_t wakeref; unsigned long flags; u64 val; - if (intel_runtime_pm_get_if_in_use(i915)) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (wakeref) { val = __get_rc6(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); /* * If we are coming back from being runtime suspended we must -- cgit v1.2.3 From 3055f0cd782fe3ef556227fc153c2f66fe47b721 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:17 +0000 Subject: drm/i915/guc: Track the rpm wakeref Keep track of our acquired wakeref for interacting with the guc, so that we can cancel it upon release and so clearly identify leaks. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-9-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_guc_log.c | 15 +++++++++------ drivers/gpu/drm/i915/intel_huc.c | 5 +++-- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 1b1581a42aa1..20c0b36d748e 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -436,6 +436,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); + intel_wakeref_t wakeref; guc_read_update_log_buffer(log); @@ -443,9 +444,9 @@ static void guc_log_capture_logs(struct intel_guc_log *log) * Generally device is expected to be active only at this * time, so get/put should be really quick. */ - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc_action_flush_log_complete(guc); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } int intel_guc_log_create(struct intel_guc_log *log) @@ -505,6 +506,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); + intel_wakeref_t wakeref; int ret; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); @@ -524,11 +526,11 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) goto out_unlock; } - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), GUC_LOG_LEVEL_IS_ENABLED(level), GUC_LOG_LEVEL_TO_VERBOSITY(level)); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; @@ -601,6 +603,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); + intel_wakeref_t wakeref; /* * Before initiating the forceful flush, wait for any pending/ongoing @@ -608,9 +611,9 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) */ flush_work(&log->relay.flush_work); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); guc_action_flush_log(guc); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index c2b076e9bada..3e8c18b6a42d 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -115,14 +115,15 @@ fail: int intel_huc_check_status(struct intel_huc *huc) { struct drm_i915_private *dev_priv = huc_to_i915(huc); + intel_wakeref_t wakeref; bool status; if (!HAS_HUC(dev_priv)) return -ENODEV; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return status; } -- cgit v1.2.3 From 538ef96b9dae7fe0c021e1c0bfc86f370d3b1488 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:18 +0000 Subject: drm/i915/gem: Track the rpm wakerefs Keep track of the temporary rpm wakerefs used for user access to the device, so that we can cancel them upon release and clearly identify any leaks. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-10-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 47 ++++++++++++++++++------------ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 ++-- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 6 ++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 22 +++++++++----- drivers/gpu/drm/i915/i915_gem_shrinker.c | 32 ++++++++++++-------- drivers/gpu/drm/i915/intel_engine_cs.c | 12 +++++--- drivers/gpu/drm/i915/intel_uncore.c | 5 ++-- 7 files changed, 81 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abd5d83fb0e5..3186859ff378 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -785,6 +785,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) { + intel_wakeref_t wakeref; + /* * No actual flushing is required for the GTT write domain for reads * from the GTT domain. Writes to it "immediately" go to main memory @@ -811,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->uncore.lock); POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } static void @@ -1069,6 +1071,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; struct drm_mm_node node; struct i915_vma *vma; void __user *user_data; @@ -1079,7 +1082,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONFAULT | @@ -1152,7 +1155,7 @@ out_unpin: i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1253,6 +1256,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; struct drm_mm_node node; struct i915_vma *vma; u64 remain, offset; @@ -1271,13 +1275,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * This easily dwarfs any performance advantage from * using the cache bypass of indirect GGTT access. */ - if (!intel_runtime_pm_get_if_in_use(i915)) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (!wakeref) { ret = -EFAULT; goto out_unlock; } } else { /* No backing pages, no fallback, we must force GGTT access */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -1359,7 +1364,7 @@ out_unpin: i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1864,6 +1869,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool write = area->vm_flags & VM_WRITE; + intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; int ret; @@ -1893,7 +1899,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -1971,7 +1977,7 @@ err_unpin: err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -2044,6 +2050,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; /* Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user @@ -2054,7 +2061,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (!obj->userfault_count) goto out; @@ -2071,7 +2078,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) @@ -4706,8 +4713,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { struct drm_i915_gem_object *obj, *on; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); llist_for_each_entry_safe(obj, on, freed, freed) { struct i915_vma *vma, *vn; @@ -4768,7 +4776,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (on) cond_resched(); } - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static void i915_gem_flush_free_objects(struct drm_i915_private *i915) @@ -4877,11 +4885,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) void i915_gem_sanitize(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; + GEM_TRACE("\n"); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); /* @@ -4904,7 +4914,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_engines_sanitize(i915, false); intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); @@ -4912,11 +4922,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915) int i915_gem_suspend(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; int ret; GEM_TRACE("\n"); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); intel_suspend_gt_powersave(i915); mutex_lock(&i915->drm.struct_mutex); @@ -4968,12 +4979,12 @@ int i915_gem_suspend(struct drm_i915_private *i915) if (WARN_ON(!intel_engines_are_idle(i915))) i915_gem_set_wedged(i915); /* no hope, discard everything */ - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return 0; err_unlock: mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index c80943698ca2..f250109e1f66 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -2202,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; + intel_wakeref_t wakeref; int out_fence_fd = -1; int err; @@ -2272,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, * wakeref that we hold until the GPU has been idle for at least * 100ms. */ - intel_runtime_pm_get(eb.i915); + wakeref = intel_runtime_pm_get(eb.i915); err = i915_mutex_lock_interruptible(dev); if (err) @@ -2424,7 +2425,7 @@ err_vma: eb_release_vmas(&eb); mutex_unlock(&dev->struct_mutex); err_rpm: - intel_runtime_pm_put_unchecked(eb.i915); + intel_runtime_pm_put(eb.i915, wakeref); i915_gem_context_put(eb.ctx); err_destroy: eb_destroy(&eb); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index b3391070acf7..f7947d89cf45 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -209,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence, static int fence_update(struct drm_i915_fence_reg *fence, struct i915_vma *vma) { + intel_wakeref_t wakeref; int ret; if (vma) { @@ -256,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence, * If the device is currently powered down, we will defer the write * to the runtime resume, see i915_gem_restore_fences(). */ - if (intel_runtime_pm_get_if_in_use(fence->i915)) { + wakeref = intel_runtime_pm_get_if_in_use(fence->i915); + if (wakeref) { fence_write(fence, vma); - intel_runtime_pm_put_unchecked(fence->i915); + intel_runtime_pm_put(fence->i915, wakeref); } if (vma) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 51f80ddd938d..e2c61633e95d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2527,6 +2527,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, { struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; + intel_wakeref_t wakeref; u32 pte_flags; /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ @@ -2534,9 +2535,9 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2553,10 +2554,11 @@ static int ggtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static int aliasing_gtt_bind_vma(struct i915_vma *vma, @@ -2588,9 +2590,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, } if (flags & I915_VMA_GLOBAL_BIND) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } return 0; @@ -2601,9 +2605,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; if (vma->flags & I915_VMA_GLOBAL_BIND) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } if (vma->flags & I915_VMA_LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 2bef02d0883d..f01489b05b5e 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -153,6 +153,7 @@ i915_gem_shrink(struct drm_i915_private *i915, { &i915->mm.bound_list, I915_SHRINK_BOUND }, { NULL, 0 }, }, *phase; + intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; bool unlock; @@ -182,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915, * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier. */ - if ((flags & I915_SHRINK_BOUND) && - !intel_runtime_pm_get_if_in_use(i915)) - flags &= ~I915_SHRINK_BOUND; + if (flags & I915_SHRINK_BOUND) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (!wakeref) + flags &= ~I915_SHRINK_BOUND; + } /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -265,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (flags & I915_SHRINK_BOUND) - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); i915_retire_requests(i915); @@ -292,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915, */ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; unsigned long freed; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); freed = i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return freed; } @@ -370,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); freed += i915_gem_shrink(i915, sc->nr_to_scan - sc->nr_scanned, &sc->nr_scanned, I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } shrinker_unlock(i915, unlock); @@ -392,12 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) container_of(nb, struct drm_i915_private, mm.oom_notifier); struct drm_i915_gem_object *obj; unsigned long unevictable, bound, unbound, freed_pages; + intel_wakeref_t wakeref; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); freed_pages = i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -435,6 +442,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct i915_vma *vma, *next; unsigned long freed_pages = 0; + intel_wakeref_t wakeref; bool unlock; if (!shrinker_lock(i915, 0, &unlock)) @@ -446,12 +454,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr MAX_SCHEDULE_TIMEOUT)) goto out; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); freed_pages += i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_VMAPS); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2e60463f2468..45e33eee76f9 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -913,10 +913,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, static bool ring_is_idle(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + intel_wakeref_t wakeref; bool idle = true; /* If the whole device is asleep, the engine must be idle */ - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return true; /* First check that no commands are left in the ring */ @@ -928,7 +930,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) idle = false; - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return idle; } @@ -1425,6 +1427,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, const struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq, *last; + intel_wakeref_t wakeref; unsigned long flags; struct rb_node *rb; int count; @@ -1483,9 +1486,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, rcu_read_unlock(); - if (intel_runtime_pm_get_if_in_use(engine->i915)) { + wakeref = intel_runtime_pm_get_if_in_use(engine->i915); + if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put_unchecked(engine->i915); + intel_runtime_pm_put(engine->i915, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8d4c76ac0e7d..d494d92da02c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; struct reg_whitelist const *entry; + intel_wakeref_t wakeref; unsigned int flags; int remain; int ret = 0; @@ -1695,7 +1696,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); if (entry->size == 8 && flags == I915_REG_READ_8B_WA) reg->val = I915_READ64_2x32(entry->offset_ldw, entry->offset_udw); @@ -1709,7 +1710,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, reg->val = I915_READ8(entry->offset_ldw); else ret = -EINVAL; - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } -- cgit v1.2.3 From 1d264d91befc3131e68edc225fa49acb2f315f73 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:19 +0000 Subject: drm/i915/fb: Track rpm wakerefs Keep track of the rpm wakeref used for framebuffer access so that we can cancel upon release and so more clearly identify leaks. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-11-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 5 +++-- drivers/gpu/drm/i915/intel_fbdev.c | 9 +++++---- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a980d5d1e601..b0b8f9ffd873 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2023,6 +2023,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); + intel_wakeref_t wakeref; struct i915_vma *vma; unsigned int pinctl; u32 alignment; @@ -2046,7 +2047,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2101,7 +2102,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return vma; } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 215e5894842d..3036d835bc2b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -177,8 +177,9 @@ static int intelfb_create(struct drm_fb_helper *helper, const struct i915_ggtt_view view = { .type = I915_GGTT_VIEW_NORMAL, }; - struct fb_info *info; struct drm_framebuffer *fb; + intel_wakeref_t wakeref; + struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; @@ -209,7 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -276,7 +277,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -284,7 +285,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev->struct_mutex); return ret; } -- cgit v1.2.3 From 6a712a20bff467e82c3507c4286f04499af1c52d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:20 +0000 Subject: drm/i915/hotplug: Track temporary rpm wakeref Keep track of the temporary rpm wakeref inside hotplug detection, so that we can cancel it immediately upon release and so clearly identify leaks. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-12-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_hotplug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b1a9cb960ca4..e027d2b4abe5 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -226,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; + intel_wakeref_t wakeref; enum hpd_pin pin; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->irq_lock); for_each_hpd_pin(pin) { @@ -261,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); } bool intel_encoder_hotplug(struct intel_encoder *encoder, -- cgit v1.2.3 From 2cb2cb5ff41abd92d6a7bfb1459b25974fa6d509 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:21 +0000 Subject: drm/i915/panel: Track temporary rpm wakeref Keep track of the temporary rpm wakeref used for panel backlight access, so that we can cancel it immediately upon release and so more clearly identify leaks. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-13-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_panel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c2b7455a023e..93a2e4b5c54c 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1203,17 +1203,18 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) struct intel_connector *connector = bl_get_data(bd); struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + intel_wakeref_t wakeref; u32 hw_level; int ret; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); hw_level = intel_panel_get_backlight(connector); ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); drm_modeset_unlock(&dev->mode_config.connection_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); return ret; } -- cgit v1.2.3 From c9d08cc3e3393e19162cb2cfaa1f454baf2aaffe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:22 +0000 Subject: drm/i915/selftests: Mark up rpm wakerefs Track the temporary wakerefs used within the selftests so that leaks are clear. v2: Add a couple of coarse annotations for mock selftests as we now loudly warn about the errors. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-14-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/huge_pages.c | 5 ++-- drivers/gpu/drm/i915/selftests/i915_gem.c | 29 ++++++++++++++-------- .../gpu/drm/i915/selftests/i915_gem_coherency.c | 5 ++-- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 27 ++++++++++++-------- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 16 +++++++++--- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 10 +++++--- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 18 +++++++++----- drivers/gpu/drm/i915/selftests/i915_request.c | 27 +++++++++++++------- drivers/gpu/drm/i915/selftests/intel_guc.c | 10 +++++--- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 15 ++++++----- drivers/gpu/drm/i915/selftests/intel_lrc.c | 25 +++++++++++-------- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 29 ++++++++++++++-------- 12 files changed, 138 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index a4d8b12be12c..a52450111802 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1756,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) }; struct drm_file *file; struct i915_gem_context *ctx; + intel_wakeref_t wakeref; int err; if (!HAS_PPGTT(dev_priv)) { @@ -1771,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1785,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 762e1a7125f5..01a46c46fe25 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915, { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915, i915_request_add(rq); } - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return err; } @@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915) static void simulate_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static int pm_prepare(struct drm_i915_private *i915) @@ -93,39 +96,45 @@ static int pm_prepare(struct drm_i915_private *i915) static void pm_suspend(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); i915_gem_suspend_gtt_mappings(i915); i915_gem_suspend_late(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static void pm_hibernate(struct drm_i915_private *i915) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); i915_gem_suspend_gtt_mappings(i915); i915_gem_freeze(i915); i915_gem_freeze_late(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static void pm_resume(struct drm_i915_private *i915) { + intel_wakeref_t wakeref; + /* * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); intel_engines_sanitize(i915, false); i915_gem_sanitize(i915); i915_gem_resume(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index eea4fc2445ae..fd89a5a33c1a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg) struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; unsigned long count, n; u32 *offsets, *values; int err = 0; @@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 6e1a0711d201..7a9b1f20b019 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -119,6 +119,7 @@ static int live_nop_switch(void *arg) struct intel_engine_cs *engine; struct i915_gem_context **ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; struct drm_file *file; struct live_test t; unsigned long n; @@ -140,7 +141,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -243,7 +244,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -593,6 +594,8 @@ static int igt_ctx_exec(void *arg) } for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + if (!engine->context_size) continue; /* No logical context support in HW */ @@ -607,9 +610,9 @@ static int igt_ctx_exec(void *arg) } } - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -699,6 +702,8 @@ static int igt_ctx_readonly(void *arg) unsigned int id; for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + if (!intel_engine_can_store_dword(engine)) continue; @@ -713,9 +718,9 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -976,6 +981,7 @@ static int igt_vm_isolation(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; + intel_wakeref_t wakeref; struct drm_file *file; I915_RND_STATE(prng); unsigned long count; @@ -1022,7 +1028,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); count = 0; for_each_engine(engine, i915, id) { @@ -1067,7 +1073,7 @@ static int igt_vm_isolation(void *arg) count, RUNTIME_INFO(i915)->num_rings); out_rpm: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); out_unlock: if (end_live_test(&t)) err = -EIO; @@ -1165,6 +1171,7 @@ static int igt_switch_to_kernel_context(void *arg) struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; + intel_wakeref_t wakeref; int err; /* @@ -1175,7 +1182,7 @@ static int igt_switch_to_kernel_context(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -1200,7 +1207,7 @@ out_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 8d22f73a9b63..bbcbf11c72b3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -336,6 +336,7 @@ static int igt_evict_contexts(void *arg) struct drm_mm_node node; struct reserved *next; } *reserved = NULL; + intel_wakeref_t wakeref; struct drm_mm_node hole; unsigned long count; int err; @@ -355,7 +356,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -400,8 +401,10 @@ static int igt_evict_contexts(void *arg) struct drm_file *file; file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); + if (IS_ERR(file)) { + err = PTR_ERR(file); + break; + } count = 0; mutex_lock(&i915->drm.struct_mutex); @@ -464,7 +467,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -480,6 +483,7 @@ int i915_gem_evict_mock_selftests(void) SUBTEST(igt_overcommit), }; struct drm_i915_private *i915; + intel_wakeref_t wakeref; int err; i915 = mock_gem_device(); @@ -487,7 +491,11 @@ int i915_gem_evict_mock_selftests(void) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + err = i915_subtests(tests, i915); + + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 87cb0602a5fc..fea8ab14e79d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); @@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } count = n; @@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg) struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; struct drm_mm_node tmp; unsigned int *order, n; int err; @@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index b03890c590d7..3575e1387c3f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -308,6 +308,7 @@ static int igt_partial_tiling(void *arg) const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; int tiling; int err; @@ -333,7 +334,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (1) { IGT_TIMEOUT(end); @@ -444,7 +445,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: @@ -506,11 +507,14 @@ static void disable_retire_worker(struct drm_i915_private *i915) mutex_lock(&i915->drm.struct_mutex); if (!i915->gt.active_requests++) { - intel_runtime_pm_get(i915); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); i915_gem_unpark(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); } mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); cancel_delayed_work_sync(&i915->gt.idle_work); } @@ -578,6 +582,8 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { + intel_wakeref_t wakeref; + if (i915_terminally_wedged(&i915->gpu_error)) break; @@ -588,9 +594,9 @@ static int igt_mmap_offset_exhaustion(void *arg) } mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = make_obj_busy(obj); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index e8880cabd5c7..9f705ff9423f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -255,13 +255,18 @@ int i915_request_mock_selftests(void) SUBTEST(igt_request_rewind), }; struct drm_i915_private *i915; + intel_wakeref_t wakeref; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + wakeref = intel_runtime_pm_get(i915); + err = i915_subtests(tests, i915); + + intel_runtime_pm_put(i915, wakeref); drm_dev_put(&i915->drm); return err; @@ -332,6 +337,7 @@ static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; + intel_wakeref_t wakeref; struct live_test t; unsigned int id; int err = -ENODEV; @@ -342,7 +348,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -403,7 +409,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -478,8 +484,9 @@ static int live_empty_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - struct live_test t; + intel_wakeref_t wakeref; struct i915_vma *batch; + struct live_test t; unsigned int id; int err = 0; @@ -489,7 +496,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -553,7 +560,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -637,6 +644,7 @@ static int live_all_engines(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_request *request[I915_NUM_ENGINES]; + intel_wakeref_t wakeref; struct i915_vma *batch; struct live_test t; unsigned int id; @@ -648,7 +656,7 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = begin_live_test(&t, i915, __func__, ""); if (err) @@ -731,7 +739,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -742,6 +750,7 @@ static int live_sequential_engines(void *arg) struct i915_request *request[I915_NUM_ENGINES] = {}; struct i915_request *prev = NULL; struct intel_engine_cs *engine; + intel_wakeref_t wakeref; struct live_test t; unsigned int id; int err; @@ -753,7 +762,7 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = begin_live_test(&t, i915, __func__, ""); if (err) @@ -860,7 +869,7 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 3590ba3d8897..c5e0a0e98fcb 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client) static int igt_guc_clients(void *args) { struct drm_i915_private *dev_priv = args; + intel_wakeref_t wakeref; struct intel_guc *guc; int err = 0; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -225,7 +226,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -238,13 +239,14 @@ unlock: static int igt_guc_doorbells(void *arg) { struct drm_i915_private *dev_priv = arg; + intel_wakeref_t wakeref; struct intel_guc *guc; int i, err = 0; u16 db_id; GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); guc = &dev_priv->guc; if (!guc) { @@ -337,7 +339,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put(dev_priv, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 58cba8188bd2..edd53d18077b 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -388,12 +388,13 @@ static int igt_global_reset(void *arg) static int igt_wedged_reset(void *arg) { struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error)); @@ -402,7 +403,7 @@ static int igt_wedged_reset(void *arg) i915_reset(i915, ALL_ENGINES, NULL); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); @@ -1600,6 +1601,7 @@ static int igt_atomic_reset(void *arg) { } }; struct drm_i915_private *i915 = arg; + intel_wakeref_t wakeref; int err = 0; /* Check that the resets are usable from atomic context */ @@ -1609,7 +1611,7 @@ static int igt_atomic_reset(void *arg) igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); /* Flush any requests before we get started and check basics */ force_reset(i915); @@ -1636,7 +1638,7 @@ out: force_reset(i915); unlock: - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); @@ -1660,6 +1662,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_handle_error), SUBTEST(igt_atomic_reset), }; + intel_wakeref_t wakeref; bool saved_hangcheck; int err; @@ -1669,7 +1672,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; /* we're long past hope of a successful reset */ - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); err = i915_subtests(tests, i915); @@ -1679,7 +1682,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ac1b18a17f3c..e6073cd4719c 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -18,13 +18,14 @@ static int live_sanitycheck(void *arg) struct i915_gem_context *ctx; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -65,7 +66,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -77,13 +78,14 @@ static int live_preempt(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -158,7 +160,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -171,13 +173,14 @@ static int live_late_preempt(void *arg) struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -251,7 +254,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -270,6 +273,7 @@ static int live_preempt_hang(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) @@ -279,7 +283,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -374,7 +378,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -562,6 +566,7 @@ static int live_preempt_smoke(void *arg) .ncontext = 1024, }; const unsigned int phase[] = { 0, BATCH }; + intel_wakeref_t wakeref; int err = -ENOMEM; u32 *cs; int n; @@ -576,7 +581,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(smoke.i915); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -627,7 +632,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put_unchecked(smoke.i915); + intel_runtime_pm_put(smoke.i915, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index b1b39c70c702..75324b6249e3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -60,10 +60,11 @@ reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) static struct drm_i915_gem_object * read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + const u32 base = engine->mmio_base; struct drm_i915_gem_object *result; + intel_wakeref_t wakeref; struct i915_request *rq; struct i915_vma *vma; - const u32 base = engine->mmio_base; u32 srm, *cs; int err; int i; @@ -92,9 +93,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - intel_runtime_pm_get(engine->i915); + wakeref = intel_runtime_pm_get(engine->i915); rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put_unchecked(engine->i915); + intel_runtime_pm_put(engine->i915, wakeref); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -228,20 +229,21 @@ switch_to_scratch_context(struct intel_engine_cs *engine, { struct i915_gem_context *ctx; struct i915_request *rq; + intel_wakeref_t wakeref; int err = 0; ctx = kernel_context(engine->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - intel_runtime_pm_get(engine->i915); + wakeref = intel_runtime_pm_get(engine->i915); if (spin) rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); else rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put_unchecked(engine->i915); + intel_runtime_pm_put(engine->i915, wakeref); kernel_context_close(ctx); @@ -273,6 +275,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; struct igt_spinner spin; + intel_wakeref_t wakeref; int err; pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", @@ -298,9 +301,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out; - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); err = reset(engine); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); if (want_spin) { igt_spinner_end(&spin); @@ -391,6 +394,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gpu_error *error = &i915->gpu_error; + intel_wakeref_t wakeref; struct wa_lists lists; bool ok; @@ -400,7 +404,8 @@ live_gpu_reset_gt_engine_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); + reference_lists_init(i915, &lists); ok = verify_gt_engine_wa(i915, &lists, "before reset"); @@ -414,7 +419,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg) out: reference_lists_fini(i915, &lists); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -429,6 +434,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) struct igt_spinner spin; enum intel_engine_id id; struct i915_request *rq; + intel_wakeref_t wakeref; struct wa_lists lists; int ret = 0; @@ -440,7 +446,8 @@ live_engine_reset_gt_engine_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); - intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(i915); + reference_lists_init(i915, &lists); for_each_engine(engine, i915, id) { @@ -496,7 +503,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) err: reference_lists_fini(i915, &lists); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); -- cgit v1.2.3 From d4225a535b3b086868ce1f82dc0593d85d04dae8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:23 +0000 Subject: drm/i915: Syntatic sugar for using intel_runtime_pm Frequently, we use intel_runtime_pm_get/_put around a small block. Formalise that usage by providing a macro to define such a block with an automatic closure to scope the intel_runtime_pm wakeref to that block, i.e. macro abuse smelling of python. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-15-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 162 +++++++++------------ drivers/gpu/drm/i915/i915_gem.c | 10 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 23 ++- drivers/gpu/drm/i915/i915_gem_shrinker.c | 51 ++++--- drivers/gpu/drm/i915/i915_pmu.c | 7 +- drivers/gpu/drm/i915/i915_sysfs.c | 7 +- drivers/gpu/drm/i915/intel_drv.h | 8 + drivers/gpu/drm/i915/intel_guc_log.c | 26 ++-- drivers/gpu/drm/i915/intel_huc.c | 7 +- drivers/gpu/drm/i915/intel_panel.c | 18 ++- drivers/gpu/drm/i915/intel_uncore.c | 30 ++-- drivers/gpu/drm/i915/selftests/i915_gem.c | 34 ++--- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 12 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 8 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 11 +- drivers/gpu/drm/i915/selftests/i915_request.c | 8 +- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 28 ++-- 17 files changed, 209 insertions(+), 241 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 66c520ba0df8..1c7913b40bb7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -953,9 +953,9 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) struct i915_gpu_state *gpu; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - gpu = i915_capture_gpu_state(i915); - intel_runtime_pm_put(i915, wakeref); + gpu = NULL; + with_intel_runtime_pm(i915, wakeref) + gpu = i915_capture_gpu_state(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -1287,17 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - wakeref = intel_runtime_pm_get(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) { + for_each_engine(engine, dev_priv, id) { + acthd[id] = intel_engine_get_active_head(engine); + seqno[id] = intel_engine_get_seqno(engine); + } - for_each_engine(engine, dev_priv, id) { - acthd[id] = intel_engine_get_active_head(engine); - seqno[id] = intel_engine_get_seqno(engine); + intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); } - intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); - - intel_runtime_pm_put(dev_priv, wakeref); - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - @@ -1573,18 +1571,16 @@ static int i915_drpc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; - int err; - - wakeref = intel_runtime_pm_get(dev_priv); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = vlv_drpc_info(m); - else if (INTEL_GEN(dev_priv) >= 6) - err = gen6_drpc_info(m); - else - err = ironlake_drpc_info(m); + int err = -ENODEV; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + err = vlv_drpc_info(m); + else if (INTEL_GEN(dev_priv) >= 6) + err = gen6_drpc_info(m); + else + err = ironlake_drpc_info(m); + } return err; } @@ -2068,8 +2064,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_file *file; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); - if (wakeref) { + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); act_freq = vlv_punit_read(dev_priv, @@ -2080,7 +2075,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) act_freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv, wakeref); } seq_printf(m, "RPS enabled? %d\n", rps->enabled); @@ -2172,9 +2166,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->huc.fw, &p); - wakeref = intel_runtime_pm_get(dev_priv); - seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); return 0; } @@ -2184,7 +2177,6 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; struct drm_printer p; - u32 tmp, i; if (!HAS_GUC(dev_priv)) return -ENODEV; @@ -2192,22 +2184,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->guc.fw, &p); - wakeref = intel_runtime_pm_get(dev_priv); - - tmp = I915_READ(GUC_STATUS); - - seq_printf(m, "\nGuC status 0x%08x:\n", tmp); - seq_printf(m, "\tBootrom status = 0x%x\n", - (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); - seq_printf(m, "\tuKernel status = 0x%x\n", - (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); - seq_printf(m, "\tMIA Core status = 0x%x\n", - (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); - seq_puts(m, "\nScratch registers:\n"); - for (i = 0; i < 16; i++) - seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 tmp = I915_READ(GUC_STATUS); + u32 i; + + seq_printf(m, "\nGuC status 0x%08x:\n", tmp); + seq_printf(m, "\tBootrom status = 0x%x\n", + (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); + seq_printf(m, "\tuKernel status = 0x%x\n", + (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); + seq_printf(m, "\tMIA Core status = 0x%x\n", + (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); + seq_puts(m, "\nScratch registers:\n"); + for (i = 0; i < 16; i++) { + seq_printf(m, "\t%2d: \t0x%x\n", + i, I915_READ(SOFT_SCRATCH(i))); + } + } return 0; } @@ -2680,19 +2673,14 @@ static int i915_energy_uJ(struct seq_file *m, void *data) if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { - intel_runtime_pm_put(dev_priv, wakeref); + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) return -ENODEV; - } units = (power & 0x1f00) >> 8; - power = I915_READ(MCH_SECP_NRG_STTS); - power = (1000000 * power) >> units; /* convert to uJ */ - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + power = I915_READ(MCH_SECP_NRG_STTS); + power = (1000000 * power) >> units; /* convert to uJ */ seq_printf(m, "%llu", power); return 0; @@ -3275,22 +3263,20 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; intel_wakeref_t wakeref; - int ret; bool enable; + int ret; ret = kstrtobool_from_user(ubuf, len, &enable); if (ret < 0) return ret; - wakeref = intel_runtime_pm_get(dev_priv); - - if (!dev_priv->ipc_enabled && enable) - DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->wm.distrust_bios_wm = true; - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (!dev_priv->ipc_enabled && enable) + DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); + dev_priv->wm.distrust_bios_wm = true; + dev_priv->ipc_enabled = enable; + intel_enable_ipc(dev_priv); + } return len; } @@ -4130,16 +4116,13 @@ i915_cache_sharing_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; intel_wakeref_t wakeref; - u32 snpcr; + u32 snpcr = 0; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -4151,7 +4134,6 @@ i915_cache_sharing_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; intel_wakeref_t wakeref; - u32 snpcr; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; @@ -4159,16 +4141,17 @@ i915_cache_sharing_set(void *data, u64 val) if (val > 3) return -EINVAL; - wakeref = intel_runtime_pm_get(dev_priv); DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 snpcr; + + /* Update the cache sharing policy here as well */ + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= val << GEN6_MBC_SNPCR_SHIFT; + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + } - /* Update the cache sharing policy here as well */ - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - snpcr &= ~GEN6_MBC_SNPCR_MASK; - snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - - intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -4405,20 +4388,17 @@ static int i915_sseu_status(struct seq_file *m, void *unused) sseu.max_eus_per_subslice = RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; - wakeref = intel_runtime_pm_get(dev_priv); - - if (IS_CHERRYVIEW(dev_priv)) { - cherryview_sseu_device_status(dev_priv, &sseu); - } else if (IS_BROADWELL(dev_priv)) { - broadwell_sseu_device_status(dev_priv, &sseu); - } else if (IS_GEN(dev_priv, 9)) { - gen9_sseu_device_status(dev_priv, &sseu); - } else if (INTEL_GEN(dev_priv) >= 10) { - gen10_sseu_device_status(dev_priv, &sseu); + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_sseu_device_status(dev_priv, &sseu); + else if (IS_BROADWELL(dev_priv)) + broadwell_sseu_device_status(dev_priv, &sseu); + else if (IS_GEN(dev_priv, 9)) + gen9_sseu_device_status(dev_priv, &sseu); + else if (INTEL_GEN(dev_priv) >= 10) + gen10_sseu_device_status(dev_priv, &sseu); } - intel_runtime_pm_put(dev_priv, wakeref); - i915_print_sseu_info(m, false, &sseu); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3186859ff378..f5e2456c4f73 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -813,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); - wakeref = intel_runtime_pm_get(dev_priv); - spin_lock_irq(&dev_priv->uncore.lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); + POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put(dev_priv, wakeref); + spin_unlock_irq(&dev_priv->uncore.lock); + } } static void diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e2c61633e95d..dbea14bf67cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2535,9 +2535,8 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - wakeref = intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2556,9 +2555,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } static int aliasing_gtt_bind_vma(struct i915_vma *vma, @@ -2592,9 +2590,10 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + vma->vm->insert_entries(vma->vm, vma, + cache_level, pte_flags); + } } return 0; @@ -2605,11 +2604,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; if (vma->flags & I915_VMA_GLOBAL_BIND) { + struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vm->clear_range(vm, vma->node.start, vma->size); } if (vma->flags & I915_VMA_LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f01489b05b5e..8ceecb026910 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -296,14 +296,14 @@ i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - unsigned long freed; + unsigned long freed = 0; - wakeref = intel_runtime_pm_get(i915); - freed = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + freed = i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); + } return freed; } @@ -376,14 +376,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_ACTIVE | - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + freed += i915_gem_shrink(i915, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, + I915_SHRINK_ACTIVE | + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); + } } shrinker_unlock(i915, unlock); @@ -400,11 +400,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) unsigned long unevictable, bound, unbound, freed_pages; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - freed_pages = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915, wakeref); + freed_pages = 0; + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -454,12 +454,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr MAX_SCHEDULE_TIMEOUT)) goto out; - wakeref = intel_runtime_pm_get(i915); - freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_VMAPS); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 3d43fc9dd25d..b1cb2d3cae16 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -230,14 +230,11 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) val = dev_priv->gt_pm.rps.cur_freq; if (dev_priv->gt.awake) { - intel_wakeref_t wakeref = - intel_runtime_pm_get_if_in_use(dev_priv); + intel_wakeref_t wakeref; - if (wakeref) { + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) val = intel_get_cagf(dev_priv, I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put(dev_priv, wakeref); - } } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2cbbf165d179..41313005af42 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -43,11 +43,10 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, i915_reg_t reg) { intel_wakeref_t wakeref; - u64 res; + u64 res = 0; - wakeref = intel_runtime_pm_get(dev_priv); - res = intel_rc6_residency_us(dev_priv, reg); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + res = intel_rc6_residency_us(dev_priv, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a1e4e1033289..71377ec49a10 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -2187,6 +2187,14 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); +#define with_intel_runtime_pm(i915, wf) \ + for ((wf) = intel_runtime_pm_get(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + +#define with_intel_runtime_pm_if_in_use(i915, wf) \ + for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 20c0b36d748e..b53582c0c6c1 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -444,9 +444,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log) * Generally device is expected to be active only at this * time, so get/put should be really quick. */ - wakeref = intel_runtime_pm_get(dev_priv); - guc_action_flush_log_complete(guc); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + guc_action_flush_log_complete(guc); } int intel_guc_log_create(struct intel_guc_log *log) @@ -507,7 +506,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); intel_wakeref_t wakeref; - int ret; + int ret = 0; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); @@ -521,16 +520,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) mutex_lock(&dev_priv->drm.struct_mutex); - if (log->level == level) { - ret = 0; + if (log->level == level) goto out_unlock; - } - wakeref = intel_runtime_pm_get(dev_priv); - ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), - GUC_LOG_LEVEL_IS_ENABLED(level), - GUC_LOG_LEVEL_TO_VERBOSITY(level)); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + ret = guc_action_control_log(guc, + GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; @@ -611,9 +608,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) */ flush_work(&log->relay.flush_work); - wakeref = intel_runtime_pm_get(i915); - guc_action_flush_log(guc); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 3e8c18b6a42d..9bd1c9002c2a 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -116,14 +116,13 @@ int intel_huc_check_status(struct intel_huc *huc) { struct drm_i915_private *dev_priv = huc_to_i915(huc); intel_wakeref_t wakeref; - bool status; + bool status = false; if (!HAS_HUC(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; return status; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 93a2e4b5c54c..5a39a6347a7a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1204,17 +1204,19 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; - u32 hw_level; - int ret; + int ret = 0; - wakeref = intel_runtime_pm_get(dev_priv); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 hw_level; - hw_level = intel_panel_get_backlight(connector); - ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - intel_runtime_pm_put(dev_priv, wakeref); + hw_level = intel_panel_get_backlight(connector); + ret = scale_hw_to_user(connector, + hw_level, bd->props.max_brightness); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } return ret; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index d494d92da02c..681ea532585e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1696,21 +1696,21 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - wakeref = intel_runtime_pm_get(dev_priv); - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = I915_READ64_2x32(entry->offset_ldw, - entry->offset_udw); - else if (entry->size == 8 && flags == 0) - reg->val = I915_READ64(entry->offset_ldw); - else if (entry->size == 4 && flags == 0) - reg->val = I915_READ(entry->offset_ldw); - else if (entry->size == 2 && flags == 0) - reg->val = I915_READ16(entry->offset_ldw); - else if (entry->size == 1 && flags == 0) - reg->val = I915_READ8(entry->offset_ldw); - else - ret = -EINVAL; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else + ret = -EINVAL; + } return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 01a46c46fe25..e77b7ed449ae 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -98,26 +98,22 @@ static void pm_suspend(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - i915_gem_suspend_late(i915); - - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + } } static void pm_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - - i915_gem_freeze(i915); - i915_gem_freeze_late(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); - intel_runtime_pm_put(i915, wakeref); + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + } } static void pm_resume(struct drm_i915_private *i915) @@ -128,13 +124,11 @@ static void pm_resume(struct drm_i915_private *i915) * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - wakeref = intel_runtime_pm_get(i915); - - intel_engines_sanitize(i915, false); - i915_gem_sanitize(i915); - i915_gem_resume(i915); - - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + intel_engines_sanitize(i915, false); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + } } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 7a9b1f20b019..4cba50679607 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -610,9 +610,9 @@ static int igt_ctx_exec(void *arg) } } - wakeref = intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915, wakeref); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -718,9 +718,9 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - wakeref = intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915, wakeref); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index bbcbf11c72b3..067e5dfa0a24 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -484,18 +484,16 @@ int i915_gem_evict_mock_selftests(void) }; struct drm_i915_private *i915; intel_wakeref_t wakeref; - int err; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); - err = i915_subtests(tests, i915); - - intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 3575e1387c3f..395ae878e0f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -509,9 +509,8 @@ static void disable_retire_worker(struct drm_i915_private *i915) if (!i915->gt.active_requests++) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - i915_gem_unpark(i915); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + i915_gem_unpark(i915); } mutex_unlock(&i915->drm.struct_mutex); @@ -593,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } + err = 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - err = make_obj_busy(obj); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + err = make_obj_busy(obj); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9f705ff9423f..2e14d6d3bad7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -256,17 +256,15 @@ int i915_request_mock_selftests(void) }; struct drm_i915_private *i915; intel_wakeref_t wakeref; - int err; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - wakeref = intel_runtime_pm_get(i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); - err = i915_subtests(tests, i915); - - intel_runtime_pm_put(i915, wakeref); drm_dev_put(&i915->drm); return err; diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 75324b6249e3..9009d7b8b136 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -93,9 +93,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - wakeref = intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915, wakeref); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) + rq = i915_request_alloc(engine, ctx); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -236,14 +236,15 @@ switch_to_scratch_context(struct intel_engine_cs *engine, if (IS_ERR(ctx)) return PTR_ERR(ctx); - wakeref = intel_runtime_pm_get(engine->i915); - - if (spin) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - else - rq = i915_request_alloc(engine, ctx); - - intel_runtime_pm_put(engine->i915, wakeref); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) { + if (spin) + rq = igt_spinner_create_request(spin, + ctx, engine, + MI_NOOP); + else + rq = i915_request_alloc(engine, ctx); + } kernel_context_close(ctx); @@ -301,9 +302,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out; - wakeref = intel_runtime_pm_get(i915); - err = reset(engine); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + err = reset(engine); if (want_spin) { igt_spinner_end(&spin); -- cgit v1.2.3 From 0e6e0be4c952372cc4c3f30bb8ddf9451f314503 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:24 +0000 Subject: drm/i915: Markup paired operations on display power domains The majority of runtime-pm operations are bounded and scoped within a function; these are easy to verify that the wakeref are handled correctly. We can employ the compiler to help us, and reduce the number of wakerefs tracked when debugging, by passing around cookies provided by the various rpm_get functions to their rpm_put counterpart. This makes the pairing explicit, and given the required wakeref cookie the compiler can verify that we pass an initialised value to the rpm_put (quite handy for double checking error paths). Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-16-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 35 +++++++++------- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 4 +- drivers/gpu/drm/i915/icl_dsi.c | 36 +++++++++++------ drivers/gpu/drm/i915/intel_audio.c | 3 +- drivers/gpu/drm/i915/intel_cdclk.c | 10 +++-- drivers/gpu/drm/i915/intel_crt.c | 25 +++++++----- drivers/gpu/drm/i915/intel_csr.c | 25 +++++++++--- drivers/gpu/drm/i915/intel_ddi.c | 36 ++++++++++------- drivers/gpu/drm/i915/intel_display.c | 68 ++++++++++++++++++++----------- drivers/gpu/drm/i915/intel_dp.c | 38 ++++++++++-------- drivers/gpu/drm/i915/intel_dpll_mgr.c | 66 ++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_drv.h | 17 ++++++-- drivers/gpu/drm/i915/intel_dsi.h | 1 + drivers/gpu/drm/i915/intel_hdmi.c | 18 +++++---- drivers/gpu/drm/i915/intel_i2c.c | 20 +++++----- drivers/gpu/drm/i915/intel_lvds.c | 8 ++-- drivers/gpu/drm/i915/intel_pipe_crc.c | 6 ++- drivers/gpu/drm/i915/intel_pm.c | 6 ++- drivers/gpu/drm/i915/intel_runtime_pm.c | 71 +++++++++++++++++++++------------ drivers/gpu/drm/i915/intel_sprite.c | 24 +++++++---- drivers/gpu/drm/i915/intel_vdsc.c | 4 +- drivers/gpu/drm/i915/vlv_dsi.c | 14 ++++--- 23 files changed, 347 insertions(+), 190 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1c7913b40bb7..e846608ee6aa 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -626,10 +626,12 @@ static void gen8_display_interrupt_info(struct seq_file *m) for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!wakeref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -644,7 +646,7 @@ static void gen8_display_interrupt_info(struct seq_file *m) pipe_name(pipe), I915_READ(GEN8_DE_PIPE_IER(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", @@ -680,6 +682,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) wakeref = intel_runtime_pm_get(dev_priv); if (IS_CHERRYVIEW(dev_priv)) { + intel_wakeref_t pref; + seq_printf(m, "Master Interrupt Control:\t%08x\n", I915_READ(GEN8_MASTER_IRQ)); @@ -695,8 +699,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) enum intel_display_power_domain power_domain; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + pref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!pref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -706,17 +711,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data) pipe_name(pipe), I915_READ(PIPESTAT(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, pref); } - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); seq_printf(m, "Port hotplug:\t%08x\n", I915_READ(PORT_HOTPLUG_EN)); seq_printf(m, "DPFLIPSTAT:\t%08x\n", I915_READ(VLV_DPFLIPSTAT)); seq_printf(m, "DPINVGTT:\t%08x\n", I915_READ(DPINVGTT)); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); for (i = 0; i < 4; i++) { seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", @@ -779,10 +784,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(VLV_IMR)); for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; + intel_wakeref_t pref; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) { + pref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + if (!pref) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -791,7 +798,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Pipe %c stat:\t%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, pref); } seq_printf(m, "Master IER:\t%08x\n", @@ -1709,8 +1716,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) intel_wakeref_t wakeref; bool sr_enabled = false; - wakeref = intel_runtime_pm_get(dev_priv); - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); if (INTEL_GEN(dev_priv) >= 9) /* no global SR status; inspect per-plane WM */; @@ -1726,8 +1732,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_runtime_pm_put(dev_priv, wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b6d0cd890a19..0aedbb88eb5b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -344,6 +344,7 @@ struct intel_csr { uint32_t mmiodata[8]; uint32_t dc_state; uint32_t allowed_dc_mask; + intel_wakeref_t wakeref; }; enum i915_cache_level { @@ -1982,6 +1983,7 @@ struct drm_i915_private { * is a slight delay before we do so. */ intel_wakeref_t awake; + intel_wakeref_t power; /** * The number of times we have woken up. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f5e2456c4f73..abbca28f4cff 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -176,7 +176,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) if (INTEL_GEN(i915) >= 6) gen6_rps_idle(i915); - intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, i915->gt.power); intel_runtime_pm_put(i915, wakeref); @@ -221,7 +221,7 @@ void i915_gem_unpark(struct drm_i915_private *i915) * Work around it by grabbing a GT IRQ power domain whilst there is any * GT activity, preventing any DC state transitions. */ - intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + i915->gt.power = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ i915->gt.epoch = 1; diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 4dd793b78996..f3a5f03646ce 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) } for_each_dsi_port(port, intel_dsi->ports) { - intel_display_power_get(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); + intel_dsi->io_wakeref[port] = + intel_display_power_get(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); } } @@ -1125,10 +1127,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) enum port port; u32 tmp; - intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO); - - if (intel_dsi->dual_link) - intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO); + for_each_dsi_port(port, intel_dsi->ports) { + intel_wakeref_t wakeref; + + wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); + if (wakeref) { + intel_display_power_put(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO, + wakeref); + } + } /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1229,13 +1239,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u32 tmp; - enum port port; enum transcoder dsi_trans; + intel_wakeref_t wakeref; + enum port port; bool ret = false; + u32 tmp; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; for_each_dsi_port(port, intel_dsi->ports) { @@ -1260,7 +1272,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, ret = tmp & PIPECONF_ENABLE; } out: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 202a58cf2d9f..de26cd0a5497 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -748,7 +748,8 @@ static void i915_audio_component_get_power(struct device *kdev) static void i915_audio_component_put_power(struct device *kdev) { - intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); + intel_display_power_put_unchecked(kdev_to_i915(kdev), + POWER_DOMAIN_AUDIO); } static void i915_audio_component_codec_wake_override(struct device *kdev, diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 2021e484a287..73cb7250118e 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -520,6 +520,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; u32 val, cmd = cdclk_state->voltage_level; + intel_wakeref_t wakeref; switch (cdclk) { case 400000: @@ -539,7 +540,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, * a system suspend. So grab the PIPE-A domain, which covers * the HW blocks needed for the following programming. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); @@ -593,7 +594,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -601,6 +602,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; u32 val, cmd = cdclk_state->voltage_level; + intel_wakeref_t wakeref; switch (cdclk) { case 333333: @@ -619,7 +621,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, * a system suspend. So grab the PIPE-A domain, which covers * the HW blocks needed for the following programming. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); @@ -637,7 +639,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); } static int bdw_calc_cdclk(int min_cdclk) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 951e9bae6921..33bd2addcbdd 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -83,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -776,6 +778,7 @@ intel_crt_detect(struct drm_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; + intel_wakeref_t wakeref; int status, ret; struct intel_load_detect_pipe tmp; @@ -784,7 +787,8 @@ intel_crt_detect(struct drm_connector *connector, force); if (i915_modparams.load_detect_test) { - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); goto load_detect; } @@ -792,7 +796,8 @@ intel_crt_detect(struct drm_connector *connector, if (dmi_check_system(intel_spurious_crt_detect)) return connector_status_disconnected; - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); if (I915_HAS_HOTPLUG(dev_priv)) { /* We can not rely on the HPD pin always being correctly wired @@ -847,7 +852,7 @@ load_detect: } out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return status; } @@ -857,10 +862,12 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(connector); struct intel_encoder *intel_encoder = &crt->base; - int ret; + intel_wakeref_t wakeref; struct i2c_adapter *i2c; + int ret; - intel_display_power_get(dev_priv, intel_encoder->power_domain); + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); @@ -872,7 +879,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) ret = intel_crt_ddc_get_modes(connector, i2c); out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index a516697bf57d..ea5fb64d33dd 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return memcpy(dmc_payload, &fw->data[readcount], nbytes); } +static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv) +{ + WARN_ON(dev_priv->csr.wakeref); + dev_priv->csr.wakeref = + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); +} + +static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv) +{ + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&dev_priv->csr.wakeref); + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); +} + static void csr_load_work_fn(struct work_struct *work) { struct drm_i915_private *dev_priv; @@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work) if (dev_priv->csr.dmc_payload) { intel_csr_load_program(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_put(dev_priv); DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n", dev_priv->csr.fw_path, @@ -467,7 +481,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) * suspend as runtime suspend *requires* a working CSR for whatever * reason. */ - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_get(dev_priv); if (INTEL_GEN(dev_priv) >= 12) { /* Allow to load fw via parameter using the last known size */ @@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv) /* Drop the reference held in case DMC isn't loaded. */ if (!dev_priv->csr.dmc_payload) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_put(dev_priv); } /** @@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv) * loaded. */ if (!dev_priv->csr.dmc_payload) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_csr_runtime_pm_get(dev_priv); } /** @@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) return; intel_csr_ucode_suspend(dev_priv); + WARN_ON(dev_priv->csr.wakeref); kfree(dev_priv->csr.dmc_payload); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 2d6ed990a232..7f3cd055de50 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1860,12 +1860,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + intel_wakeref_t wakeref; enum pipe pipe = 0; int ret = 0; uint32_t tmp; - if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv, - intel_encoder->power_domain))) + wakeref = intel_display_power_get_if_enabled(dev_priv, + intel_encoder->power_domain); + if (WARN_ON(!wakeref)) return -ENXIO; if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { @@ -1880,7 +1882,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, tmp &= ~TRANS_DDI_HDCP_SIGNALLING; I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); out: - intel_display_power_put(dev_priv, intel_encoder->power_domain); + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -1891,13 +1893,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) struct intel_encoder *encoder = intel_connector->encoder; int type = intel_connector->base.connector_type; enum port port = encoder->port; - enum pipe pipe = 0; enum transcoder cpu_transcoder; + intel_wakeref_t wakeref; + enum pipe pipe = 0; uint32_t tmp; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; if (!encoder->get_hw_state(encoder, &pipe)) { @@ -1939,7 +1943,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) } out: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -1950,6 +1954,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = encoder->port; + intel_wakeref_t wakeref; enum pipe p; u32 tmp; u8 mst_pipe_mask; @@ -1957,8 +1962,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, *pipe_mask = 0; *is_dp_mst = false; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return; tmp = I915_READ(DDI_BUF_CTL(port)); @@ -2029,7 +2035,7 @@ out: "(PHY_CTL %08x)\n", port_name(port), tmp); } - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, @@ -3286,7 +3292,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); - intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); } @@ -3306,7 +3313,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, intel_disable_ddi_buf(encoder, old_crtc_state); - intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); @@ -3626,8 +3634,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, if (intel_crtc_has_dp_encoder(crtc_state) || intel_port_is_tc(dev_priv, encoder->port)) - intel_display_power_put(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); + intel_display_power_put_unchecked(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); } void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b0b8f9ffd873..36c56d1637b8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1197,17 +1197,19 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) state = true; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (wakeref) { u32 val = I915_READ(PIPECONF(cpu_transcoder)); cur_state = !!(val & PIPECONF_ENABLE); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } else { cur_state = false; } @@ -3412,6 +3414,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + intel_wakeref_t wakeref; bool ret; u32 val; @@ -3421,7 +3424,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; val = I915_READ(DSPCNTR(i9xx_plane)); @@ -3434,7 +3438,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> DISPPLANE_SEL_PIPE_SHIFT; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -6107,7 +6111,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain; for_each_power_domain(domain, domains) - intel_display_power_put(dev_priv, domain); + intel_display_power_put_unchecked(dev_priv, domain); } static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, @@ -6354,7 +6358,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, domains = intel_crtc->enabled_power_domains; for_each_power_domain(domain, domains) - intel_display_power_put(dev_priv, domain); + intel_display_power_put_unchecked(dev_priv, domain); intel_crtc->enabled_power_domains = 0; dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); @@ -7966,11 +7970,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; uint32_t tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -8071,7 +8077,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ret = true; out: - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -9038,11 +9044,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; uint32_t tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -9125,7 +9133,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ret = true; out: - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -9734,7 +9742,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, out: for_each_power_domain(power_domain, power_domain_mask) - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put_unchecked(dev_priv, power_domain); return active; } @@ -9984,17 +9992,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(PIPE_A); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; *pipe = PIPE_A; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -10217,6 +10227,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; u32 val; @@ -10226,7 +10237,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; val = I915_READ(CURCNTR(plane->pipe)); @@ -10239,7 +10251,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> MCURSOR_PIPE_SELECT_SHIFT; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -12950,6 +12962,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc *crtc; struct intel_crtc *intel_crtc; u64 put_domains[I915_MAX_PIPES] = {}; + intel_wakeref_t wakeref = 0; int i; intel_atomic_commit_fence_wait(intel_state); @@ -12957,7 +12970,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_dependencies(state); if (intel_state->modeset) - intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); @@ -13094,7 +13107,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * the culprit. */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); + intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } /* @@ -15496,19 +15509,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) void i915_redisable_vga(struct drm_i915_private *dev_priv) { - /* This function can be called both from intel_modeset_setup_hw_state or + intel_wakeref_t wakeref; + + /* + * This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very * paranoid "someone might have enabled VGA while we were not looking" * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy - * the rest of the driver uses. */ - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) + * the rest of the driver uses. + */ + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_VGA); + if (!wakeref) return; i915_redisable_vga_power_on(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); + intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref); } /* FIXME read out full plane state for all planes */ @@ -15808,12 +15827,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; struct intel_encoder *encoder; + struct intel_crtc *crtc; + intel_wakeref_t wakeref; int i; - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_early_display_was(dev_priv); intel_modeset_readout_hw_state(dev); @@ -15883,7 +15903,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, modeset_put_power_domains(dev_priv, put_domains); } - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); intel_fbc_init_pipe_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d3cd40e656fe..fc85fd77a661 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -621,8 +621,8 @@ static void pps_unlock(struct intel_dp *intel_dp) mutex_unlock(&dev_priv->pps_mutex); - intel_display_power_put(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); + intel_display_power_put_unchecked(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); } static void @@ -2511,8 +2511,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); - intel_display_power_put(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_display_power_put_unchecked(dev_priv, + intel_aux_power_domain(intel_dig_port)); } static void edp_panel_vdd_work(struct work_struct *__work) @@ -2657,7 +2657,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) intel_dp->panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ - intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port)); + intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); } void intel_edp_panel_off(struct intel_dp *intel_dp) @@ -2983,16 +2983,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, encoder->port, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -5365,12 +5367,13 @@ intel_dp_detect(struct drm_connector *connector, enum drm_connector_status status; enum intel_display_power_domain aux_domain = intel_aux_power_domain(dig_port); + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); - intel_display_power_get(dev_priv, aux_domain); + wakeref = intel_display_power_get(dev_priv, aux_domain); /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) @@ -5436,7 +5439,7 @@ intel_dp_detect(struct drm_connector *connector, ret = intel_dp_retrain_link(encoder, ctx); if (ret) { - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); return ret; } } @@ -5460,7 +5463,7 @@ out: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); return status; } @@ -5473,6 +5476,7 @@ intel_dp_force(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); enum intel_display_power_domain aux_domain = intel_aux_power_domain(dig_port); + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -5481,11 +5485,11 @@ intel_dp_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - intel_display_power_get(dev_priv, aux_domain); + wakeref = intel_display_power_get(dev_priv, aux_domain); intel_dp_set_edid(intel_dp); - intel_display_power_put(dev_priv, aux_domain); + intel_display_power_put(dev_priv, aux_domain, wakeref); } static int intel_dp_get_modes(struct drm_connector *connector) @@ -5931,6 +5935,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum irqreturn ret = IRQ_NONE; + intel_wakeref_t wakeref; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { /* @@ -5953,8 +5958,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) return IRQ_NONE; } - intel_display_power_get(dev_priv, - intel_aux_power_domain(intel_dig_port)); + wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(intel_dig_port)); if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { @@ -5984,7 +5989,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) put_power: intel_display_power_put(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_aux_power_domain(intel_dig_port), + wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index d513ca875c67..04870e960537 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; uint32_t val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(PCH_DPLL(id)); @@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->fp0 = I915_READ(PCH_FP0(id)); hw_state->fp1 = I915_READ(PCH_FP1(id)); - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & DPLL_VCO_ENABLE; } @@ -509,15 +512,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; uint32_t val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(WRPLL_CTL(id)); hw_state->wrpll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & WRPLL_PLL_ENABLE; } @@ -526,15 +532,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { + intel_wakeref_t wakeref; uint32_t val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(SPLL_CTL); hw_state->spll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return val & SPLL_PLL_ENABLE; } @@ -989,9 +998,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, uint32_t val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1011,7 +1023,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -1020,12 +1032,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - uint32_t val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; + uint32_t val; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1041,7 +1056,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -1579,14 +1594,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - uint32_t val; - bool ret; + intel_wakeref_t wakeref; enum dpio_phy phy; enum dpio_channel ch; + uint32_t val; + bool ret; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -1643,7 +1661,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -2091,10 +2109,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; + intel_wakeref_t wakeref; uint32_t val; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; ret = false; @@ -2113,7 +2134,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } @@ -2950,11 +2971,14 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; - enum port port; + intel_wakeref_t wakeref; bool ret = false; + enum port port; + uint32_t val; - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) return false; val = I915_READ(icl_pll_id_to_enable_reg(id)); @@ -3007,7 +3031,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 71377ec49a10..5e5ceec7c004 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -2118,12 +2118,21 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); + enum intel_display_power_domain domain, + intel_wakeref_t wakeref); +#else +#define intel_display_power_put(i915, domain, wakeref) \ + intel_display_power_put_unchecked(i915, domain) +#endif void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices); diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index fc7a09049f81..df3d390e25fe 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -39,6 +39,7 @@ struct intel_dsi { struct intel_encoder base; struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS]; + intel_wakeref_t io_wakeref[I915_MAX_PORTS]; /* GPIO Desc for CRC based Panel control */ struct gpio_desc *gpio_panel; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 14a0c28fe7c1..14727ac06f67 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1190,15 +1190,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } @@ -1895,11 +1897,12 @@ intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + intel_wakeref_t wakeref; struct edid *edid; bool connected = false; struct i2c_adapter *i2c; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); @@ -1914,7 +1917,7 @@ intel_hdmi_set_edid(struct drm_connector *connector) intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); to_intel_connector(connector)->detect_edid = edid; if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { @@ -1939,11 +1942,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; + intel_wakeref_t wakeref; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (IS_ICELAKE(dev_priv) && !intel_digital_port_connected(encoder)) @@ -1955,7 +1959,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) status = connector_status_connected; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); if (status != connector_status_connected) cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c6159aff9dc8..4f6dc8c94634 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -697,12 +697,13 @@ out: static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { - struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, - adapter); + struct intel_gmbus *bus = + container_of(adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; + intel_wakeref_t wakeref; int ret; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); @@ -714,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); return ret; } int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { - struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, - adapter); + struct intel_gmbus *bus = + container_of(adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int ret; u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; struct i2c_msg msgs[] = { @@ -741,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) .buf = buf, } }; + intel_wakeref_t wakeref; + int ret; - intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); mutex_lock(&dev_priv->gmbus_mutex); /* @@ -753,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); mutex_unlock(&dev_priv->gmbus_mutex); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6adcc8d037bf..b01aacb5d73d 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -94,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + intel_wakeref_t wakeref; bool ret; - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index bdabcfab8090..56d614b02302 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -589,6 +589,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; + intel_wakeref_t wakeref; u32 val = 0; /* shut up gcc */ int ret = 0; @@ -598,7 +599,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) } power_domain = POWER_DOMAIN_PIPE(crtc->index); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) { DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); return -EIO; } @@ -624,7 +626,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) pipe_crc->skipped = 0; out: - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 83b01cde8113..ab7257720c7e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3989,10 +3989,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; enum plane_id plane_id; power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return; for_each_plane_id_on_crtc(crtc, plane_id) @@ -4001,7 +4003,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, &ddb_y[plane_id], &ddb_uv[plane_id]); - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); } void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index c29577d7a35a..6aeceab37000 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1855,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ -void intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - - intel_runtime_pm_get(dev_priv); + intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); mutex_unlock(&power_domains->lock); + + return wakeref; } /** @@ -1881,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ -bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + intel_wakeref_t wakeref; bool is_enabled; - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) return false; mutex_lock(&power_domains->lock); @@ -1901,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - if (!is_enabled) - intel_runtime_pm_put_unchecked(dev_priv); + if (!is_enabled) { + intel_runtime_pm_put(dev_priv, wakeref); + wakeref = 0; + } - return is_enabled; + return wakeref; } -/** - * intel_display_power_put - release a power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function drops the power domain reference obtained by - * intel_display_power_get() and might power down the corresponding hardware - * block right away if this is the last reference. - */ -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +static void __intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) { struct i915_power_domains *power_domains; struct i915_power_well *power_well; @@ -1935,10 +1932,34 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_power_well_put(dev_priv, power_well); mutex_unlock(&power_domains->lock); +} +/** + * intel_display_power_put - release a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + */ +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + __intel_display_power_put(dev_priv, domain); intel_runtime_pm_put_unchecked(dev_priv); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put(dev_priv, wakeref); +} +#endif + #define I830_PIPES_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ @@ -4048,7 +4069,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_verify_state(dev_priv); } @@ -4067,7 +4088,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) */ void intel_power_domains_enable(struct drm_i915_private *dev_priv) { - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_verify_state(dev_priv); } @@ -4102,7 +4123,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); /* * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 @@ -4123,7 +4144,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * power wells if power domains must be deinitialized for suspend. */ if (!i915_modparams.disable_power_well) { - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_verify_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8f3982c03925..87a06fcca284 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -618,17 +618,19 @@ skl_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -882,17 +884,19 @@ vlv_plane_get_hw_state(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -1051,17 +1055,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } @@ -1217,17 +1223,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return false; ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE; *pipe = plane->pipe; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c index 48537827616f..23abf03736e7 100644 --- a/drivers/gpu/drm/i915/intel_vdsc.c +++ b/drivers/gpu/drm/i915/intel_vdsc.c @@ -1082,6 +1082,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) I915_WRITE(dss_ctl2_reg, dss_ctl2_val); /* Disable Power wells for VDSC/joining */ - intel_display_power_put(dev_priv, - intel_dsc_power_domain(old_crtc_state)); + intel_display_power_put_unchecked(dev_priv, + intel_dsc_power_domain(old_crtc_state)); } diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index bb1287020f80..d116fead8514 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -959,13 +959,15 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + intel_wakeref_t wakeref; enum port port; bool active = false; DRM_DEBUG_KMS("\n"); - if (!intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) return false; /* @@ -1021,7 +1023,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, } out_put_power: - intel_display_power_put(dev_priv, encoder->power_domain); + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return active; } @@ -1574,6 +1576,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) enum drm_panel_orientation orientation; struct intel_plane *plane; struct intel_crtc *crtc; + intel_wakeref_t wakeref; enum pipe pipe; u32 val; @@ -1584,7 +1587,8 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) plane = to_intel_plane(crtc->base.primary); power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; val = I915_READ(DSPCNTR(plane->i9xx_plane)); @@ -1596,7 +1600,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) else orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; - intel_display_power_put(dev_priv, power_domain); + intel_display_power_put(dev_priv, power_domain, wakeref); return orientation; } -- cgit v1.2.3 From 25c896bdb8dc8b90e8f4d477185780596fe42bbe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:25 +0000 Subject: drm/i915: Track the wakeref used to initialise display power domains On module load and unload, we grab the POWER_DOMAIN_INIT powerwells and transfer them to the runtime-pm code. We can use our wakeref tracking to verify that the wakeref is indeed passed from init to enable, and disable to fini; and across suspend. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-17-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 3 + drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_runtime_pm.c | 151 ++++++++++++++++++-------------- 3 files changed, 88 insertions(+), 68 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e846608ee6aa..926acd5b6e5d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2699,6 +2699,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) if (!HAS_RUNTIME_PM(dev_priv)) seq_puts(m, "Runtime power management not supported\n"); + seq_printf(m, "Runtime power status: %s\n", + enableddisabled(!dev_priv->power_domains.wakeref)); + seq_printf(m, "GPU idle: %s (epoch %u)\n", yesno(!dev_priv->gt.awake), dev_priv->gt.epoch); seq_printf(m, "IRQs disabled: %s\n", diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0aedbb88eb5b..97200411dfad 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -822,6 +822,8 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; + intel_wakeref_t wakeref; + struct mutex lock; int domain_use_count[POWER_DOMAIN_NUM]; struct i915_power_well *power_wells; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6aeceab37000..79f00610860b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3997,7 +3997,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); /** * intel_power_domains_init_hw - initialize hardware power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all @@ -4011,30 +4011,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); * intel_power_domains_enable()) and must be paired with * intel_power_domains_fini_hw(). */ -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) +void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; power_domains->initializing = true; - if (IS_ICELAKE(dev_priv)) { - icl_display_core_init(dev_priv, resume); - } else if (IS_CANNONLAKE(dev_priv)) { - cnl_display_core_init(dev_priv, resume); - } else if (IS_GEN9_BC(dev_priv)) { - skl_display_core_init(dev_priv, resume); - } else if (IS_GEN9_LP(dev_priv)) { - bxt_display_core_init(dev_priv, resume); - } else if (IS_CHERRYVIEW(dev_priv)) { + if (IS_ICELAKE(i915)) { + icl_display_core_init(i915, resume); + } else if (IS_CANNONLAKE(i915)) { + cnl_display_core_init(i915, resume); + } else if (IS_GEN9_BC(i915)) { + skl_display_core_init(i915, resume); + } else if (IS_GEN9_LP(i915)) { + bxt_display_core_init(i915, resume); + } else if (IS_CHERRYVIEW(i915)) { mutex_lock(&power_domains->lock); - chv_phy_control_init(dev_priv); + chv_phy_control_init(i915); mutex_unlock(&power_domains->lock); - } else if (IS_VALLEYVIEW(dev_priv)) { + } else if (IS_VALLEYVIEW(i915)) { mutex_lock(&power_domains->lock); - vlv_cmnlane_wa(dev_priv); + vlv_cmnlane_wa(i915); mutex_unlock(&power_domains->lock); - } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7) - intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) { + intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); + } /* * Keep all power wells enabled for any dependent HW access during @@ -4042,18 +4043,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) * resources powered until display HW readout is complete. We drop * this reference in intel_power_domains_enable(). */ - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + /* Disable power support if the user asked so. */ if (!i915_modparams.disable_power_well) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - intel_power_domains_sync_hw(dev_priv); + intel_display_power_get(i915, POWER_DOMAIN_INIT); + intel_power_domains_sync_hw(i915); power_domains->initializing = false; } /** * intel_power_domains_fini_hw - deinitialize hw power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * * De-initializes the display power domain HW state. It also ensures that the * device stays powered up so that the driver can be reloaded. @@ -4062,21 +4065,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw(). */ -void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) +void intel_power_domains_fini_hw(struct drm_i915_private *i915) { - /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put_unchecked(dev_priv); + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) - intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(i915); - intel_power_domains_verify_state(dev_priv); + /* Keep the power well enabled, but cancel its rpm wakeref. */ + intel_runtime_pm_put(i915, wakeref); } /** * intel_power_domains_enable - enable toggling of display power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Enable the ondemand enabling/disabling of the display power wells. Note that * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled @@ -4086,30 +4092,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) * of display HW readout (which will acquire the power references reflecting * the current HW state). */ -void intel_power_domains_enable(struct drm_i915_private *dev_priv) +void intel_power_domains_enable(struct drm_i915_private *i915) { - intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); - intel_power_domains_verify_state(dev_priv); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); + intel_power_domains_verify_state(i915); } /** * intel_power_domains_disable - disable toggling of display power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Disable the ondemand enabling/disabling of the display power wells. See * intel_power_domains_enable() for which power wells this call controls. */ -void intel_power_domains_disable(struct drm_i915_private *dev_priv) +void intel_power_domains_disable(struct drm_i915_private *i915) { - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + struct i915_power_domains *power_domains = &i915->power_domains; - intel_power_domains_verify_state(dev_priv); + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(i915); } /** * intel_power_domains_suspend - suspend power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) * * This function prepares the hardware power domain state before entering @@ -4118,12 +4130,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv) * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ -void intel_power_domains_suspend(struct drm_i915_private *dev_priv, +void intel_power_domains_suspend(struct drm_i915_private *i915, enum i915_drm_suspend_mode suspend_mode) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&power_domains->wakeref); - intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); /* * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 @@ -4132,10 +4146,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && - dev_priv->csr.dmc_payload != NULL) { - intel_power_domains_verify_state(dev_priv); + i915->csr.dmc_payload) { + intel_power_domains_verify_state(i915); return; } @@ -4144,25 +4158,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * power wells if power domains must be deinitialized for suspend. */ if (!i915_modparams.disable_power_well) { - intel_display_power_put_unchecked(dev_priv, POWER_DOMAIN_INIT); - intel_power_domains_verify_state(dev_priv); + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + intel_power_domains_verify_state(i915); } - if (IS_ICELAKE(dev_priv)) - icl_display_core_uninit(dev_priv); - else if (IS_CANNONLAKE(dev_priv)) - cnl_display_core_uninit(dev_priv); - else if (IS_GEN9_BC(dev_priv)) - skl_display_core_uninit(dev_priv); - else if (IS_GEN9_LP(dev_priv)) - bxt_display_core_uninit(dev_priv); + if (IS_ICELAKE(i915)) + icl_display_core_uninit(i915); + else if (IS_CANNONLAKE(i915)) + cnl_display_core_uninit(i915); + else if (IS_GEN9_BC(i915)) + skl_display_core_uninit(i915); + else if (IS_GEN9_LP(i915)) + bxt_display_core_uninit(i915); power_domains->display_core_suspended = true; } /** * intel_power_domains_resume - resume power domain state - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function resume the hardware power domain state during system resume. * @@ -4170,28 +4184,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * intel_power_domains_enable()) and must be paired with * intel_power_domains_suspend(). */ -void intel_power_domains_resume(struct drm_i915_private *dev_priv) +void intel_power_domains_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; if (power_domains->display_core_suspended) { - intel_power_domains_init_hw(dev_priv, true); + intel_power_domains_init_hw(i915, true); power_domains->display_core_suspended = false; } else { - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); } - intel_power_domains_verify_state(dev_priv); + intel_power_domains_verify_state(i915); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) +static void intel_power_domains_dump_info(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; struct i915_power_well *power_well; - for_each_power_well(dev_priv, power_well) { + for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%-25s %d\n", @@ -4206,7 +4222,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) /** * intel_power_domains_verify_state - verify the HW/SW state for all power wells - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Verify if the reference count of each power well matches its HW enabled * state and the total refcount of the domains it belongs to. This must be @@ -4214,22 +4230,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) * acquiring reference counts for any power wells in use and disabling the * ones left on by BIOS but not required by any active output. */ -static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &i915->power_domains; struct i915_power_well *power_well; bool dump_domain_info; mutex_lock(&power_domains->lock); dump_domain_info = false; - for_each_power_well(dev_priv, power_well) { + for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; int domains_count; bool enabled; - enabled = power_well->desc->ops->is_enabled(dev_priv, - power_well); + enabled = power_well->desc->ops->is_enabled(i915, power_well); if ((power_well->count || power_well->desc->always_on) != enabled) DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", @@ -4253,7 +4268,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) static bool dumped; if (!dumped) { - intel_power_domains_dump_info(dev_priv); + intel_power_domains_dump_info(i915); dumped = true; } } @@ -4263,7 +4278,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) #else -static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *i915) { } -- cgit v1.2.3 From 69d938200598dda76c70d7b3f39d19e89b15ea71 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:26 +0000 Subject: drm/i915/dp: Markup pps lock power well Track where and when we acquire and release the power well for pps access along the dp aux link, with a view to detecting if we leak any wakerefs. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-18-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_dp.c | 231 +++++++++++++++++++++------------------- 1 file changed, 121 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fc85fd77a661..0a3ac98a779e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -601,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, static void intel_dp_pps_init(struct intel_dp *intel_dp); -static void pps_lock(struct intel_dp *intel_dp) +static intel_wakeref_t +pps_lock(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; /* * See intel_power_sequencer_reset() why we need * a power domain reference here. */ - intel_display_power_get(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); + wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); mutex_lock(&dev_priv->pps_mutex); + + return wakeref; } -static void pps_unlock(struct intel_dp *intel_dp) +static intel_wakeref_t +pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->pps_mutex); - - intel_display_power_put_unchecked(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); + intel_display_power_put(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp)), + wakeref); + return 0; } +#define with_pps_lock(dp, wf) \ + for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) + static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { @@ -973,30 +982,30 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) return 0; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); - i915_reg_t pp_ctrl_reg, pp_div_reg; - u32 pp_div; - - pp_ctrl_reg = PP_CONTROL(pipe); - pp_div_reg = PP_DIVISOR(pipe); - pp_div = I915_READ(pp_div_reg); - pp_div &= PP_REFERENCE_DIVIDER_MASK; - - /* 0x1F write to PP_DIV_REG sets max cycle delay */ - I915_WRITE(pp_div_reg, pp_div | 0x1F); - I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); - msleep(intel_dp->panel_power_cycle_delay); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + i915_reg_t pp_ctrl_reg, pp_div_reg; + u32 pp_div; + + pp_ctrl_reg = PP_CONTROL(pipe); + pp_div_reg = PP_DIVISOR(pipe); + pp_div = I915_READ(pp_div_reg); + pp_div &= PP_REFERENCE_DIVIDER_MASK; + + /* 0x1F write to PP_DIV_REG sets max cycle delay */ + I915_WRITE(pp_div_reg, pp_div | 0x1F); + I915_WRITE(pp_ctrl_reg, + PANEL_UNLOCK_REGS | PANEL_POWER_OFF); + msleep(intel_dp->panel_power_cycle_delay); + } } - pps_unlock(intel_dp); - return 0; } @@ -1184,16 +1193,17 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, to_i915(intel_dig_port->base.base.dev); i915_reg_t ch_ctl, ch_data[5]; uint32_t aux_clock_divider; + intel_wakeref_t wakeref; int i, ret, recv_bytes; - uint32_t status; int try, clock = 0; + uint32_t status; bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - pps_lock(intel_dp); + wakeref = pps_lock(intel_dp); /* * We will be called with VDD already enabled for dpcd/edid/oui reads. @@ -1337,7 +1347,7 @@ out: if (vdd) edp_panel_vdd_off(intel_dp, false); - pps_unlock(intel_dp); + pps_unlock(intel_dp, wakeref); return ret; } @@ -2464,15 +2474,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) */ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; bool vdd; if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - vdd = edp_panel_vdd_on(intel_dp); - pps_unlock(intel_dp); - + vdd = false; + with_pps_lock(intel_dp, wakeref) + vdd = edp_panel_vdd_on(intel_dp); I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", port_name(dp_to_dig_port(intel_dp)->base.port)); } @@ -2517,13 +2527,15 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) static void edp_panel_vdd_work(struct work_struct *__work) { - struct intel_dp *intel_dp = container_of(to_delayed_work(__work), - struct intel_dp, panel_vdd_work); + struct intel_dp *intel_dp = + container_of(to_delayed_work(__work), + struct intel_dp, panel_vdd_work); + intel_wakeref_t wakeref; - pps_lock(intel_dp); - if (!intel_dp->want_panel_vdd) - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + if (!intel_dp->want_panel_vdd) + edp_panel_vdd_off_sync(intel_dp); + } } static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) @@ -2613,12 +2625,13 @@ static void edp_panel_on(struct intel_dp *intel_dp) void intel_edp_panel_on(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; + if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - edp_panel_on(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_on(intel_dp); } @@ -2662,20 +2675,20 @@ static void edp_panel_off(struct intel_dp *intel_dp) void intel_edp_panel_off(struct intel_dp *intel_dp) { + intel_wakeref_t wakeref; + if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - edp_panel_off(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_off(intel_dp); } /* Enable backlight in the panel power control. */ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp; - i915_reg_t pp_ctrl_reg; + intel_wakeref_t wakeref; /* * If we enable the backlight right away following a panel power @@ -2685,17 +2698,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) */ wait_backlight_on(intel_dp); - pps_lock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; - pp = ironlake_get_pp_control(intel_dp); - pp |= EDP_BLC_ENABLE; - - pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_BLC_ENABLE; - pps_unlock(intel_dp); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } } /* Enable backlight PWM and backlight PP control. */ @@ -2717,23 +2729,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp; - i915_reg_t pp_ctrl_reg; + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; - pps_lock(intel_dp); - - pp = ironlake_get_pp_control(intel_dp); - pp &= ~EDP_BLC_ENABLE; - - pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_BLC_ENABLE; - pps_unlock(intel_dp); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } intel_dp->last_backlight_off = jiffies; edp_wait_backlight_off(intel_dp); @@ -2761,12 +2771,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector, bool enable) { struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + intel_wakeref_t wakeref; bool is_enabled; - pps_lock(intel_dp); - is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; - pps_unlock(intel_dp); - + is_enabled = false; + with_pps_lock(intel_dp, wakeref) + is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; if (is_enabled == enable) return; @@ -3276,22 +3286,21 @@ static void intel_enable_dp(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); uint32_t dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; if (WARN_ON(dp_reg & DP_PORT_EN)) return; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_init_panel_power_sequencer(encoder, pipe_config); - - intel_dp_enable_port(intel_dp, pipe_config); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_init_panel_power_sequencer(encoder, pipe_config); - edp_panel_vdd_on(intel_dp); - edp_panel_on(intel_dp); - edp_panel_vdd_off(intel_dp, true); + intel_dp_enable_port(intel_dp, pipe_config); - pps_unlock(intel_dp); + edp_panel_vdd_on(intel_dp); + edp_panel_on(intel_dp); + edp_panel_vdd_off(intel_dp, true); + } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { unsigned int lane_mask = 0x0; @@ -3989,9 +3998,10 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_dp->DP = DP; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - pps_lock(intel_dp); - intel_dp->active_pipe = INVALID_PIPE; - pps_unlock(intel_dp); + intel_wakeref_t wakeref; + + with_pps_lock(intel_dp, wakeref) + intel_dp->active_pipe = INVALID_PIPE; } } @@ -5561,14 +5571,15 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder) intel_dp_mst_encoder_cleanup(intel_dig_port); if (intel_dp_is_edp(intel_dp)) { + intel_wakeref_t wakeref; + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); /* * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); if (intel_dp->edp_notifier.notifier_call) { unregister_reboot_notifier(&intel_dp->edp_notifier); @@ -5590,6 +5601,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; @@ -5599,9 +5611,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) * Make sure vdd is actually turned off here. */ cancel_delayed_work_sync(&intel_dp->panel_vdd_work); - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); } static @@ -5882,6 +5893,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + intel_wakeref_t wakeref; if (!HAS_DDI(dev_priv)) intel_dp->DP = I915_READ(intel_dp->output_reg); @@ -5891,18 +5903,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) intel_dp->reset_link_params = true; - pps_lock(intel_dp); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - intel_dp->active_pipe = vlv_active_pipe(intel_dp); + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + intel_dp->active_pipe = vlv_active_pipe(intel_dp); - if (intel_dp_is_edp(intel_dp)) { - /* Reinit the power sequencer, in case BIOS did something with it. */ - intel_dp_pps_init(intel_dp); - intel_edp_panel_vdd_sanitize(intel_dp); + if (intel_dp_is_edp(intel_dp)) { + /* + * Reinit the power sequencer, in case BIOS did + * something nasty with it. + */ + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } } - - pps_unlock(intel_dp); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -6698,8 +6711,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct drm_display_mode *downclock_mode = NULL; bool has_dpcd; struct drm_display_mode *scan; - struct edid *edid; enum pipe pipe = INVALID_PIPE; + intel_wakeref_t wakeref; + struct edid *edid; if (!intel_dp_is_edp(intel_dp)) return true; @@ -6719,13 +6733,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, return false; } - pps_lock(intel_dp); - - intel_dp_init_panel_power_timestamps(intel_dp); - intel_dp_pps_init(intel_dp); - intel_edp_panel_vdd_sanitize(intel_dp); - - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) { + intel_dp_init_panel_power_timestamps(intel_dp); + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } /* Cache DPCD and EDID for edp. */ has_dpcd = intel_edp_init_dpcd(intel_dp); @@ -6810,9 +6822,8 @@ out_vdd_off: * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ - pps_lock(intel_dp); - edp_panel_vdd_off_sync(intel_dp); - pps_unlock(intel_dp); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); return false; } -- cgit v1.2.3 From 04161d64da09f73d919c6bb935a003c0fd4bc8a8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:27 +0000 Subject: drm/i915: Complain if hsw_get_pipe_config acquires the same power well twice As we only release each power well once, we assume that each transcoder maps to a different domain. Complain if this is not so. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-19-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 36c56d1637b8..7c974cf064fd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9569,6 +9569,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + + WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); *power_domain_mask |= BIT_ULL(power_domain); tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); @@ -9596,6 +9598,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) continue; + + WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); *power_domain_mask |= BIT_ULL(power_domain); /* @@ -9712,7 +9716,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { + WARN_ON(power_domain_mask & BIT_ULL(power_domain)); power_domain_mask |= BIT_ULL(power_domain); + if (INTEL_GEN(dev_priv) >= 9) skylake_get_pfit_config(crtc, pipe_config); else -- cgit v1.2.3 From 8d761e773e29f4be5c6aae50f57262afb2b83db4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:28 +0000 Subject: drm/i915: Combined gt.awake/gt.power wakerefs As the GT_IRQ power domain implies a wakeref, we can use it inplace of our existing redundant rpm grab. v2: Drop papering over forgetting to take the runtime wakeref in selftests Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-20-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 11 ++++------- drivers/gpu/drm/i915/intel_lrc.c | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 97200411dfad..fa99824f63b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1985,7 +1985,6 @@ struct drm_i915_private { * is a slight delay before we do so. */ intel_wakeref_t awake; - intel_wakeref_t power; /** * The number of times we have woken up. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abbca28f4cff..61037e7292ee 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -176,9 +176,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) if (INTEL_GEN(i915) >= 6) gen6_rps_idle(i915); - intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, i915->gt.power); - - intel_runtime_pm_put(i915, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); return i915->gt.epoch; } @@ -203,13 +201,11 @@ void i915_gem_unpark(struct drm_i915_private *i915) lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->gt.active_requests); + assert_rpm_wakelock_held(i915); if (i915->gt.awake) return; - i915->gt.awake = intel_runtime_pm_get_noresume(i915); - GEM_BUG_ON(!i915->gt.awake); - /* * It seems that the DMC likes to transition between the DC states a lot * when there are no connected displays (no active power domains) during @@ -221,7 +217,8 @@ void i915_gem_unpark(struct drm_i915_private *i915) * Work around it by grabbing a GT IRQ power domain whilst there is any * GT activity, preventing any DC state transitions. */ - i915->gt.power = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + GEM_BUG_ON(!i915->gt.awake); if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ i915->gt.epoch = 1; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72ab89151ab9..dcb11c5f8230 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1045,7 +1045,7 @@ static void execlists_submission_tasklet(unsigned long data) GEM_TRACE("%s awake?=%d, active=%x\n", engine->name, - engine->i915->gt.awake, + !!engine->i915->gt.awake, engine->execlists.active); spin_lock_irqsave(&engine->timeline.lock, flags); -- cgit v1.2.3 From 4a8ab5ea0cde753b03bfefe4c98a8c4c61f46550 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:29 +0000 Subject: drm/i915: Mark up Ironlake ips with rpm wakerefs Currently Ironlake operates under the assumption that rpm awake (and its error checking is disabled). As such, we have missed a few places where we access registers without taking the rpm wakeref and thus trigger warnings. intel_ips being one culprit. As this involved adding a potentially sleeping rpm_get, we have to rearrange the spinlocks slightly and so switch to acquiring a device-ref under the spinlock rather than hold the spinlock for the whole operation. To be consistent, we make the change in pattern common to the intel_ips interface even though this adds a few more atomic operations than necessary in a few cases. v2: Sagar noted the mb around setting mch_dev were overkill as we only need ordering there, and that i915_emon_status was still using struct_mutex for no reason, but lacked rpm. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-21-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 32 +++---- drivers/gpu/drm/i915/i915_drv.c | 3 + drivers/gpu/drm/i915/intel_pm.c | 172 ++++++++++++++++++------------------ 3 files changed, 102 insertions(+), 105 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 926acd5b6e5d..37c9aff234e0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1741,32 +1741,24 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_emon_status(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - unsigned long temp, chipset, gfx; + struct drm_i915_private *i915 = node_to_i915(m->private); intel_wakeref_t wakeref; - int ret; - if (!IS_GEN(dev_priv, 5)) + if (!IS_GEN(i915, 5)) return -ENODEV; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + with_intel_runtime_pm(i915, wakeref) { + unsigned long temp, chipset, gfx; - wakeref = intel_runtime_pm_get(dev_priv); - - temp = i915_mch_val(dev_priv); - chipset = i915_chipset_val(dev_priv); - gfx = i915_gfx_val(dev_priv); - mutex_unlock(&dev->struct_mutex); + temp = i915_mch_val(i915); + chipset = i915_chipset_val(i915); + gfx = i915_gfx_val(i915); - intel_runtime_pm_put(dev_priv, wakeref); - - seq_printf(m, "GMCH temp: %ld\n", temp); - seq_printf(m, "Chipset power: %ld\n", chipset); - seq_printf(m, "GFX power: %ld\n", gfx); - seq_printf(m, "Total power: %ld\n", chipset + gfx); + seq_printf(m, "GMCH temp: %ld\n", temp); + seq_printf(m, "Chipset power: %ld\n", chipset); + seq_printf(m, "GFX power: %ld\n", gfx); + seq_printf(m, "Total power: %ld\n", chipset + gfx); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5731f992cf44..dafbbfadd1ad 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1780,6 +1780,9 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_unregister(dev_priv); + /* Flush any external code that still may be under the RCU lock */ + synchronize_rcu(); + if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ab7257720c7e..7613ae72df3d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6203,10 +6203,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) */ DEFINE_SPINLOCK(mchdev_lock); -/* Global for IPS driver to get at the current i915 device. Protected by - * mchdev_lock. */ -static struct drm_i915_private *i915_mch_dev; - bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) { u16 rgvswctl; @@ -7849,16 +7845,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - unsigned long val; + intel_wakeref_t wakeref; + unsigned long val = 0; if (!IS_GEN(dev_priv, 5)) return 0; - spin_lock_irq(&mchdev_lock); - - val = __i915_chipset_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + val = __i915_chipset_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } return val; } @@ -7935,14 +7932,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { + intel_wakeref_t wakeref; + if (!IS_GEN(dev_priv, 5)) return; - spin_lock_irq(&mchdev_lock); - - __i915_update_gfx_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + __i915_update_gfx_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } } static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) @@ -7984,18 +7983,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - unsigned long val; + intel_wakeref_t wakeref; + unsigned long val = 0; if (!IS_GEN(dev_priv, 5)) return 0; - spin_lock_irq(&mchdev_lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&mchdev_lock); + val = __i915_gfx_val(dev_priv); + spin_unlock_irq(&mchdev_lock); + } - val = __i915_gfx_val(dev_priv); + return val; +} - spin_unlock_irq(&mchdev_lock); +static struct drm_i915_private *i915_mch_dev; - return val; +static struct drm_i915_private *mchdev_get(void) +{ + struct drm_i915_private *i915; + + rcu_read_lock(); + i915 = i915_mch_dev; + if (!kref_get_unless_zero(&i915->drm.ref)) + i915 = NULL; + rcu_read_unlock(); + + return i915; } /** @@ -8006,23 +8021,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) */ unsigned long i915_read_mch_val(void) { - struct drm_i915_private *dev_priv; - unsigned long chipset_val, graphics_val, ret = 0; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) - goto out_unlock; - dev_priv = i915_mch_dev; - - chipset_val = __i915_chipset_val(dev_priv); - graphics_val = __i915_gfx_val(dev_priv); + struct drm_i915_private *i915; + unsigned long chipset_val = 0; + unsigned long graphics_val = 0; + intel_wakeref_t wakeref; - ret = chipset_val + graphics_val; + i915 = mchdev_get(); + if (!i915) + return 0; -out_unlock: - spin_unlock_irq(&mchdev_lock); + with_intel_runtime_pm(i915, wakeref) { + spin_lock_irq(&mchdev_lock); + chipset_val = __i915_chipset_val(i915); + graphics_val = __i915_gfx_val(i915); + spin_unlock_irq(&mchdev_lock); + } - return ret; + drm_dev_put(&i915->drm); + return chipset_val + graphics_val; } EXPORT_SYMBOL_GPL(i915_read_mch_val); @@ -8033,23 +8049,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val); */ bool i915_gpu_raise(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; + struct drm_i915_private *i915; - if (dev_priv->ips.max_delay > dev_priv->ips.fmax) - dev_priv->ips.max_delay--; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + if (i915->ips.max_delay > i915->ips.fmax) + i915->ips.max_delay--; spin_unlock_irq(&mchdev_lock); - return ret; + drm_dev_put(&i915->drm); + return true; } EXPORT_SYMBOL_GPL(i915_gpu_raise); @@ -8061,23 +8073,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise); */ bool i915_gpu_lower(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; + struct drm_i915_private *i915; - if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) - dev_priv->ips.max_delay++; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + if (i915->ips.max_delay < i915->ips.min_delay) + i915->ips.max_delay++; spin_unlock_irq(&mchdev_lock); - return ret; + drm_dev_put(&i915->drm); + return true; } EXPORT_SYMBOL_GPL(i915_gpu_lower); @@ -8088,13 +8096,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); */ bool i915_gpu_busy(void) { - bool ret = false; + struct drm_i915_private *i915; + bool ret; - spin_lock_irq(&mchdev_lock); - if (i915_mch_dev) - ret = i915_mch_dev->gt.awake; - spin_unlock_irq(&mchdev_lock); + i915 = mchdev_get(); + if (!i915) + return false; + ret = i915->gt.awake; + + drm_dev_put(&i915->drm); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_busy); @@ -8107,24 +8118,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy); */ bool i915_gpu_turbo_disable(void) { - struct drm_i915_private *dev_priv; - bool ret = true; - - spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) { - ret = false; - goto out_unlock; - } - dev_priv = i915_mch_dev; - - dev_priv->ips.max_delay = dev_priv->ips.fstart; + struct drm_i915_private *i915; + bool ret; - if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) - ret = false; + i915 = mchdev_get(); + if (!i915) + return false; -out_unlock: + spin_lock_irq(&mchdev_lock); + i915->ips.max_delay = i915->ips.fstart; + ret = ironlake_set_drps(i915, i915->ips.fstart); spin_unlock_irq(&mchdev_lock); + drm_dev_put(&i915->drm); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); @@ -8153,18 +8159,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv) { /* We only register the i915 ips part with intel-ips once everything is * set up, to avoid intel-ips sneaking in and reading bogus values. */ - spin_lock_irq(&mchdev_lock); - i915_mch_dev = dev_priv; - spin_unlock_irq(&mchdev_lock); + rcu_assign_pointer(i915_mch_dev, dev_priv); ips_ping_for_i915_load(); } void intel_gpu_ips_teardown(void) { - spin_lock_irq(&mchdev_lock); - i915_mch_dev = NULL; - spin_unlock_irq(&mchdev_lock); + rcu_assign_pointer(i915_mch_dev, NULL); } static void intel_init_emon(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From d8af327087f753d7cc26d813e8e5a83461d8e5c6 Mon Sep 17 00:00:00 2001 From: Juha-Pekka Heikkila Date: Thu, 20 Dec 2018 13:26:08 +0200 Subject: drm/i915: Fix ILK-IVB primary plane enable delays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Primary and sprite plane enable on ILK-IVB may take two frames to complete Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103925 Signed-off-by: Juha-Pekka Heikkila Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/1545305168-6047-1-git-send-email-juhapekka.heikkila@gmail.com --- drivers/gpu/drm/i915/intel_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7c974cf064fd..accb3081dee1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10851,8 +10851,11 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * Despite the w/a only being listed for IVB we assume that * the ILK/SNB note has similar ramifications, hence we apply * the w/a on all three platforms. + * + * With experimental results seems this is needed also for primary + * plane, not only sprite plane. */ - if (plane->id == PLANE_SPRITE0 && + if (plane->id != PLANE_CURSOR && (IS_GEN_RANGE(dev_priv, 5, 6) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!needs_scaling(old_plane_state) && -- cgit v1.2.3 From 305dc3f9834c9df40b3f6d4a6980447fb503cfc1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 21:59:56 +0000 Subject: drm/i915: Differentiate between ggtt->mutex and ppgtt->mutex We have two classes of VM, global GTT and per-process GTT. In order to allow ourselves the freedom to mix both along call chains, distinguish the two classes with regards to their mutex and lockdep maps. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190114215956.32266-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 ++ drivers/gpu/drm/i915/selftests/mock_gtt.c | 6 +++--- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dbea14bf67cc..74e6d02dcbbf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -473,8 +473,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) spin_unlock(&vm->free_pages.lock); } -static void i915_address_space_init(struct i915_address_space *vm, - struct drm_i915_private *dev_priv) +static void i915_address_space_init(struct i915_address_space *vm, int subclass) { /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -482,6 +481,7 @@ static void i915_address_space_init(struct i915_address_space *vm, * attempt holding the lock is immediately reported by lockdep. */ mutex_init(&vm->mutex); + lockdep_set_subclass(&vm->mutex, subclass); i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); GEM_BUG_ON(!vm->total); @@ -1547,7 +1547,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) /* From bdw, there is support for read-only pages in the PPGTT. */ ppgtt->vm.has_read_only = true; - i915_address_space_init(&ppgtt->vm, i915); + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. @@ -1996,7 +1996,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; - i915_address_space_init(&ppgtt->base.vm, i915); + i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT); ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; @@ -3433,7 +3433,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * and beyond the end of the GTT if we do not provide a guard. */ mutex_lock(&dev_priv->drm.struct_mutex); - i915_address_space_init(&ggtt->vm, dev_priv); + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); ggtt->vm.is_ggtt = true; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index e2360f16427a..9229b03d629b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -288,6 +288,8 @@ struct i915_address_space { bool closed; struct mutex mutex; /* protects vma and our lists */ +#define VM_CLASS_GGTT 0 +#define VM_CLASS_PPGTT 1 u64 scratch_pte; struct i915_page_dma scratch_page; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6ae418c76015..976c862b3842 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - i915_address_space_init(&ppgtt->vm, i915); + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -102,6 +102,7 @@ void mock_init_ggtt(struct drm_i915_private *i915) struct i915_ggtt *ggtt = &i915->ggtt; ggtt->vm.i915 = i915; + ggtt->vm.is_ggtt = true; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); ggtt->mappable_end = resource_size(&ggtt->gmadr); @@ -117,9 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, i915); - ggtt->vm.is_ggtt = true; + i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); } void mock_fini_ggtt(struct drm_i915_private *i915) -- cgit v1.2.3 From 8cd999181f8c744c87fb64e7b3600876ec3428b2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 21:17:27 +0000 Subject: drm/i915: Prevent concurrent GGTT update and use on Braswell (again) On Braswell, under heavy stress, if we update the GGTT while simultaneously accessing another region inside the GTT, we are returned the wrong values. To prevent this we stop the machine to update the GGTT entries so that no memory traffic can occur at the same time. This was first spotted in commit 5bab6f60cb4d1417ad7c599166bcfec87529c1a2 Author: Chris Wilson Date: Fri Oct 23 18:43:32 2015 +0100 drm/i915: Serialise updates to GGTT with access through GGTT on Braswell but removed again in forlorn hope with commit 4509276ee824bb967885c095c610767e42345c36 Author: Chris Wilson Date: Mon Feb 20 12:47:18 2017 +0000 drm/i915: Remove Braswell GGTT update w/a However, gem_concurrent_blit is once again only stable with the patch applied and CI is detecting the odd failure in forked gem_mmap_gtt tests (which smell like the same issue). Fwiw, a wide variety of CPU memory barriers (around GGTT flushing, fence updates, PTE updates) and GPU flushes/invalidates (between requests, after PTE updates) were tried as part of the investigation to find an alternate cause, nothing comes close to serialised GGTT updates. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105591 Testcase: igt/gem_concurrent_blit Testcase: igt/gem_mmap_gtt/*forked* References: 5bab6f60cb4d ("drm/i915: Serialise updates to GGTT with access through GGTT on Braswell") References: 4509276ee824 ("drm/i915: Remove Braswell GGTT update w/a") Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190114211729.30352-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 74e6d02dcbbf..d24628f184e4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3232,7 +3232,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_entries = gen8_ggtt_insert_entries; /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { + if (intel_ggtt_update_needs_vtd_wa(dev_priv) || + IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; if (ggtt->vm.clear_range != nop_clear_range) -- cgit v1.2.3 From b14c06ec024947eaa35212f2380e90233d5092e0 Mon Sep 17 00:00:00 2001 From: Aditya Swarup Date: Thu, 10 Jan 2019 15:08:44 -0800 Subject: drm/i915/cnl: Fix CNL macros for Voltage Swing programming CNL macros for register groups CNL_PORT_TX_DW2_* / CNL_PORT_TX_DW5_* are configured incorrectly wrt definition of _CNL_PORT_TX_DW_GRP. v2: Jani suggested to keep the macros organized semantically i.e., by function, secondarily by port/pipe/transcoder.->(dw, port) Fixes: 4e53840fdfdd ("drm/i915/icl: Introduce new macros to get combophy registers") Cc: Clint Taylor Cc: Imre Deak Cc: Jani Nikula Signed-off-by: Aditya Swarup Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190110230844.9213-1-aditya.swarup@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 44958d994bfa..fad5a9e8b44d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1814,7 +1814,7 @@ enum i915_power_well_id { #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 -#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ _CNL_PORT_TX_B_GRP_OFFSET, \ @@ -1822,7 +1822,7 @@ enum i915_power_well_id { _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_F_GRP_OFFSET) + \ 4 * (dw)) -#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ +#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ @@ -1858,9 +1858,9 @@ enum i915_power_well_id { #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 -#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) -#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ +#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) +#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) +#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) @@ -1888,8 +1888,8 @@ enum i915_power_well_id { #define RTERM_SELECT(x) ((x) << 3) #define RTERM_SELECT_MASK (0x7 << 3) -#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) -#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) +#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) +#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) -- cgit v1.2.3 From fed85691b4083308792a862c50f9492c8f19433e Mon Sep 17 00:00:00 2001 From: Radhakrishna Sripada Date: Wed, 9 Jan 2019 13:14:14 -0800 Subject: drm/i915: Fix the static code analysis warning in debugfs intel_dp->dsc_dpcd is defined as an array making the if check redundant. Fixes: e845f099f1c6 ("drm/i915/dsc: Add Per connector debugfs node for DSC support/enable") Cc: Rodrigo Vivi Reported-by: Nathan Chancellor Signed-off-by: Radhakrishna Sripada Reviewed-by: Manasi Navare Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190109211414.15622-1-radhakrishna.sripada@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 37c9aff234e0..24e2d52efa8e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4973,9 +4973,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", yesno(crtc_state->dsc_params.compression_enable)); - if (intel_dp->dsc_dpcd) - seq_printf(m, "DSC_Sink_Support: %s\n", - yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); + seq_printf(m, "DSC_Sink_Support: %s\n", + yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); if (!intel_dp_is_edp(intel_dp)) seq_printf(m, "FEC_Sink_Support: %s\n", yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); -- cgit v1.2.3 From 6d2438c8233bd06f24014a1fef17a7ac1c1777fe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 15 Jan 2019 10:25:05 +0000 Subject: drm/i915/perf: Annotate i915_perf.wakeref for keneldoc drivers/gpu/drm/i915/i915_drv.h:1375: warning: Function parameter or member 'wakeref' not described in 'i915_perf_stream' Reported-by: kbuild-all@01.org Fixes: 6619c0075f78 ("drm/i915/perf: Track the rpm wakeref") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190115102505.4843-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa99824f63b3..956c1c86f90d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1336,6 +1336,10 @@ struct i915_perf_stream { */ struct list_head link; + /** + * @wakeref: As we keep the device awake while the perf stream is + * active, we track our runtime pm reference for later release. + */ intel_wakeref_t wakeref; /** -- cgit v1.2.3 From decd29e6b5fe08e20fade72100e2d4a85dc5f766 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 15 Jan 2019 12:20:57 +0000 Subject: drm/i915: Only dump GPU state on set-wedged if interesting As we may frequently mark the device as wedged to flush requests off it during the normal course of events, quite often we have a large state dump that is of no interest. Don't bother dumping it all if the engines are all idle. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190115122057.1677-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 61037e7292ee..d35dd3d6d3b6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3178,7 +3178,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) GEM_TRACE("start\n"); - if (GEM_SHOW_DEBUG()) { + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { struct drm_printer p = drm_debug_printer(__func__); for_each_engine(engine, i915, id) -- cgit v1.2.3 From e9d49bb718f394faab07498ca5f14d7f8414b1da Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 20 Dec 2018 15:26:02 +0200 Subject: drm/i915/ddi: Move DDI port detection to the corresponding helper We have already a function to detect DDI ports using VBT, so instead of opencoding the DDI specific version of this, move the opencoded part to the existing helper. Cc: Jani Nikula Cc: Mika Kahola Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20181220132604.25222-1-imre.deak@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_bios.c | 9 +++++++++ drivers/gpu/drm/i915/intel_display.c | 4 +--- 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 140c218128cb..561a4f9f044c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1946,6 +1946,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por }; int i; + if (HAS_DDI(dev_priv)) { + const struct ddi_vbt_port_info *port_info = + &dev_priv->vbt.ddi_port_info[port]; + + return port_info->supports_dp || + port_info->supports_dvi || + port_info->supports_hdmi; + } + /* FIXME maybe deal with port A as well? */ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) return false; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index accb3081dee1..3d1e1e652d72 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14362,9 +14362,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * On SKL we don't have a way to detect DDI-E so we rely on VBT. */ if (IS_GEN9_BC(dev_priv) && - (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || - dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || - dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) + intel_bios_is_port_present(dev_priv, PORT_E)) intel_ddi_init(dev_priv, PORT_E); } else if (HAS_PCH_SPLIT(dev_priv)) { -- cgit v1.2.3 From 3f2e9ed0b26d65de6d4673233671537ed7fc6c98 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 20 Dec 2018 15:26:03 +0200 Subject: drm/i915/icl: Detect port F presence via VBT Registering an output for a non-existent port (on a given SKU) can lead to problems when trying to use the port, for instance timeouts during power well enabling. Since there are no strap bits for port detection we have to rely on VBT for this, so do that here. There are no known SKUs where any of the A-E ports are non-existent, so to reduce the likelihood of breakage due to incorrect VBT information, do this detection only for port F (which is known to be missing on some ICL SKUs). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108915 Cc: Mika Kahola Cc: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20181220132604.25222-2-imre.deak@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_display.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3d1e1e652d72..ce1cdd2c0c3d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14320,7 +14320,13 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_D); intel_ddi_init(dev_priv, PORT_E); - intel_ddi_init(dev_priv, PORT_F); + /* + * On some ICL SKUs port F is not present. No strap bits for + * this, so rely on VBT. + */ + if (intel_bios_is_port_present(dev_priv, PORT_F)) + intel_ddi_init(dev_priv, PORT_F); + icl_dsi_init(dev_priv); } else if (IS_GEN9_LP(dev_priv)) { /* -- cgit v1.2.3 From 484d9a844d0d0aeaa4cd3cec20885b7de9986a55 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 15 Jan 2019 12:44:42 +0000 Subject: drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start Since commit 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers") we have been able to report failure from mmu_invalidate_range_start which allows us to use a trylock on the struct_mutex to avoid potential recursion and report -EBUSY instead. Furthermore, this allows us to pull the work into the main callback and avoid the sleight-of-hand in using a workqueue to avoid lockdep. However, not all paths to mmu_invalidate_range_start are prepared to handle failure, so instead of reporting the recursion, deal with it by propagating the failure upwards, who can decide themselves to handle it or report it. v2: Mark up the recursive lock behaviour and comment on the various weak points. v3: Follow commit 3824e41975ae ("drm/i915: Use mutex_lock_killable() from inside the shrinker") and also use mutex_lock_killable(). v3.1: No leak on EINTR. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108375 References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190115124442.3500-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_gem.c | 30 +++-- drivers/gpu/drm/i915/i915_gem_object.h | 7 + drivers/gpu/drm/i915/i915_gem_userptr.c | 224 +++++++++++++++----------------- 4 files changed, 139 insertions(+), 126 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 956c1c86f90d..da055a86db4d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2935,8 +2935,8 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ }; -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass); +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass); void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d35dd3d6d3b6..565b2fa1607d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2303,8 +2303,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) struct sg_table *pages; pages = fetch_and_zero(&obj->mm.pages); - if (!pages) - return NULL; + if (IS_ERR_OR_NULL(pages)) + return pages; spin_lock(&i915->mm.obj_lock); list_del(&obj->mm.link); @@ -2328,22 +2328,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) return pages; } -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass) +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass) { struct sg_table *pages; + int ret; if (i915_gem_object_has_pinned_pages(obj)) - return; + return -EBUSY; GEM_BUG_ON(obj->bind_count); - if (!i915_gem_object_has_pages(obj)) - return; /* May be called by shrinker from within get_pages() (on another bo) */ mutex_lock_nested(&obj->mm.lock, subclass); - if (unlikely(atomic_read(&obj->mm.pages_pin_count))) + if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { + ret = -EBUSY; goto unlock; + } /* * ->put_pages might need to allocate memory for the bit17 swizzle @@ -2351,11 +2352,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, * lists early. */ pages = __i915_gem_object_unset_pages(obj); + + /* + * XXX Temporary hijinx to avoid updating all backends to handle + * NULL pages. In the future, when we have more asynchronous + * get_pages backends we should be better able to handle the + * cancellation of the async task in a more uniform manner. + */ + if (!pages && !i915_gem_object_needs_async_cancel(obj)) + pages = ERR_PTR(-EINVAL); + if (!IS_ERR(pages)) obj->ops->put_pages(obj, pages); + ret = 0; unlock: mutex_unlock(&obj->mm.lock); + + return ret; } bool i915_sg_trim(struct sg_table *orig_st) diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index ff3da64470dd..cb1b0144d274 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -57,6 +57,7 @@ struct drm_i915_gem_object_ops { #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) #define I915_GEM_OBJECT_IS_PROXY BIT(2) +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -387,6 +388,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; } +static inline bool +i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; +} + static inline bool i915_gem_object_is_active(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 1fb6a7bb5054..38e19a42e0f4 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -49,77 +49,67 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root_cached objects; - struct workqueue_struct *wq; + struct i915_mm_struct *mm; }; struct i915_mmu_object { struct i915_mmu_notifier *mn; struct drm_i915_gem_object *obj; struct interval_tree_node it; - struct list_head link; - struct work_struct work; - bool attached; }; -static void cancel_userptr(struct work_struct *work) +static void add_object(struct i915_mmu_object *mo) { - struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); - struct drm_i915_gem_object *obj = mo->obj; - struct work_struct *active; - - /* Cancel any active worker and force us to re-evaluate gup */ - mutex_lock(&obj->mm.lock); - active = fetch_and_zero(&obj->userptr.work); - mutex_unlock(&obj->mm.lock); - if (active) - goto out; - - i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); - - mutex_lock(&obj->base.dev->struct_mutex); - - /* We are inside a kthread context and can't be interrupted */ - if (i915_gem_object_unbind(obj) == 0) - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - WARN_ONCE(i915_gem_object_has_pages(obj), - "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", - obj->bind_count, - atomic_read(&obj->mm.pages_pin_count), - obj->pin_global); - - mutex_unlock(&obj->base.dev->struct_mutex); - -out: - i915_gem_object_put(obj); + GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb)); + interval_tree_insert(&mo->it, &mo->mn->objects); } -static void add_object(struct i915_mmu_object *mo) +static void del_object(struct i915_mmu_object *mo) { - if (mo->attached) + if (RB_EMPTY_NODE(&mo->it.rb)) return; - interval_tree_insert(&mo->it, &mo->mn->objects); - mo->attached = true; + interval_tree_remove(&mo->it, &mo->mn->objects); + RB_CLEAR_NODE(&mo->it.rb); } -static void del_object(struct i915_mmu_object *mo) +static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) { - if (!mo->attached) + struct i915_mmu_object *mo = obj->userptr.mmu_object; + + /* + * During mm_invalidate_range we need to cancel any userptr that + * overlaps the range being invalidated. Doing so requires the + * struct_mutex, and that risks recursion. In order to cause + * recursion, the user must alias the userptr address space with + * a GTT mmapping (possible with a MAP_FIXED) - then when we have + * to invalidate that mmaping, mm_invalidate_range is called with + * the userptr address *and* the struct_mutex held. To prevent that + * we set a flag under the i915_mmu_notifier spinlock to indicate + * whether this object is valid. + */ + if (!mo) return; - interval_tree_remove(&mo->it, &mo->mn->objects); - mo->attached = false; + spin_lock(&mo->mn->lock); + if (value) + add_object(mo); + else + del_object(mo); + spin_unlock(&mo->mn->lock); } -static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, - const struct mmu_notifier_range *range) +static int +userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, + const struct mmu_notifier_range *range) { struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); - struct i915_mmu_object *mo; struct interval_tree_node *it; - LIST_HEAD(cancelled); + struct mutex *unlock = NULL; unsigned long end; + int ret = 0; if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return 0; @@ -130,11 +120,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, spin_lock(&mn->lock); it = interval_tree_iter_first(&mn->objects, range->start, end); while (it) { + struct drm_i915_gem_object *obj; + if (!range->blockable) { - spin_unlock(&mn->lock); - return -EAGAIN; + ret = -EAGAIN; + break; } - /* The mmu_object is released late when destroying the + + /* + * The mmu_object is released late when destroying the * GEM object so it is entirely possible to gain a * reference on an object in the process of being freed * since our serialisation is via the spinlock and not @@ -143,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, * use-after-free we only acquire a reference on the * object if it is not in the process of being destroyed. */ - mo = container_of(it, struct i915_mmu_object, it); - if (kref_get_unless_zero(&mo->obj->base.refcount)) - queue_work(mn->wq, &mo->work); + obj = container_of(it, struct i915_mmu_object, it)->obj; + if (!kref_get_unless_zero(&obj->base.refcount)) { + it = interval_tree_iter_next(it, range->start, end); + continue; + } + spin_unlock(&mn->lock); + + if (!unlock) { + unlock = &mn->mm->i915->drm.struct_mutex; + + switch (mutex_trylock_recursive(unlock)) { + default: + case MUTEX_TRYLOCK_FAILED: + if (!mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { + i915_gem_object_put(obj); + return -EINTR; + } + /* fall through */ + case MUTEX_TRYLOCK_SUCCESS: + break; + + case MUTEX_TRYLOCK_RECURSIVE: + unlock = ERR_PTR(-EEXIST); + break; + } + } + + ret = i915_gem_object_unbind(obj); + if (ret == 0) + ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + i915_gem_object_put(obj); + if (ret) + goto unlock; - list_add(&mo->link, &cancelled); - it = interval_tree_iter_next(it, range->start, end); + spin_lock(&mn->lock); + + /* + * As we do not (yet) protect the mmu from concurrent insertion + * over this range, there is no guarantee that this search will + * terminate given a pathologic workload. + */ + it = interval_tree_iter_first(&mn->objects, range->start, end); } - list_for_each_entry(mo, &cancelled, link) - del_object(mo); spin_unlock(&mn->lock); - if (!list_empty(&cancelled)) - flush_workqueue(mn->wq); +unlock: + if (!IS_ERR_OR_NULL(unlock)) + mutex_unlock(unlock); + + return ret; - return 0; } static const struct mmu_notifier_ops i915_gem_userptr_notifier = { - .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, + .invalidate_range_start = userptr_mn_invalidate_range_start, }; static struct i915_mmu_notifier * -i915_mmu_notifier_create(struct mm_struct *mm) +i915_mmu_notifier_create(struct i915_mm_struct *mm) { struct i915_mmu_notifier *mn; @@ -176,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT_CACHED; - mn->wq = alloc_workqueue("i915-userptr-release", - WQ_UNBOUND | WQ_MEM_RECLAIM, - 0); - if (mn->wq == NULL) { - kfree(mn); - return ERR_PTR(-ENOMEM); - } + mn->mm = mm; return mn; } @@ -192,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { struct i915_mmu_object *mo; - mo = obj->userptr.mmu_object; - if (mo == NULL) + mo = fetch_and_zero(&obj->userptr.mmu_object); + if (!mo) return; spin_lock(&mo->mn->lock); del_object(mo); spin_unlock(&mo->mn->lock); kfree(mo); - - obj->userptr.mmu_object = NULL; } static struct i915_mmu_notifier * @@ -214,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) if (mn) return mn; - mn = i915_mmu_notifier_create(mm->mm); + mn = i915_mmu_notifier_create(mm); if (IS_ERR(mn)) err = PTR_ERR(mn); @@ -237,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) mutex_unlock(&mm->i915->mm_lock); up_write(&mm->mm->mmap_sem); - if (mn && !IS_ERR(mn)) { - destroy_workqueue(mn->wq); + if (mn && !IS_ERR(mn)) kfree(mn); - } return err ? ERR_PTR(err) : mm->mn; } @@ -263,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return PTR_ERR(mn); mo = kzalloc(sizeof(*mo), GFP_KERNEL); - if (mo == NULL) + if (!mo) return -ENOMEM; mo->mn = mn; mo->obj = obj; mo->it.start = obj->userptr.ptr; mo->it.last = obj->userptr.ptr + obj->base.size - 1; - INIT_WORK(&mo->work, cancel_userptr); + RB_CLEAR_NODE(&mo->it.rb); obj->userptr.mmu_object = mo; return 0; @@ -284,12 +304,16 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn, return; mmu_notifier_unregister(&mn->mn, mm); - destroy_workqueue(mn->wq); kfree(mn); } #else +static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) +{ +} + static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { @@ -458,42 +482,6 @@ alloc_table: return st; } -static int -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, - bool value) -{ - int ret = 0; - - /* During mm_invalidate_range we need to cancel any userptr that - * overlaps the range being invalidated. Doing so requires the - * struct_mutex, and that risks recursion. In order to cause - * recursion, the user must alias the userptr address space with - * a GTT mmapping (possible with a MAP_FIXED) - then when we have - * to invalidate that mmaping, mm_invalidate_range is called with - * the userptr address *and* the struct_mutex held. To prevent that - * we set a flag under the i915_mmu_notifier spinlock to indicate - * whether this object is valid. - */ -#if defined(CONFIG_MMU_NOTIFIER) - if (obj->userptr.mmu_object == NULL) - return 0; - - spin_lock(&obj->userptr.mmu_object->mn->lock); - /* In order to serialise get_pages with an outstanding - * cancel_userptr, we must drop the struct_mutex and try again. - */ - if (!value) - del_object(obj->userptr.mmu_object); - else if (!work_pending(&obj->userptr.mmu_object->work)) - add_object(obj->userptr.mmu_object); - else - ret = -EAGAIN; - spin_unlock(&obj->userptr.mmu_object->mn->lock); -#endif - - return ret; -} - static void __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { @@ -679,8 +667,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - BUG_ON(obj->userptr.work != NULL); + /* Cancel any inflight work and force them to restart their gup */ + obj->userptr.work = NULL; __i915_gem_userptr_set_active(obj, false); + if (!pages) + return; if (obj->mm.madv != I915_MADV_WILLNEED) obj->mm.dirty = false; @@ -718,7 +709,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, + I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_ASYNC_CANCEL, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, .dmabuf_export = i915_gem_userptr_dmabuf_export, -- cgit v1.2.3 From 0212bdef5a4de344a179f9c267899df5bb268ca7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 15 Jan 2019 21:29:48 +0000 Subject: drm/i915: Move intel_execlists_show_requests() aside Move the debug pretty printer into a standalone routine prior to extending it in upcoming feature work. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190115212948.10423-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_engine_cs.c | 55 +++----------------------------- drivers/gpu/drm/i915/intel_lrc.c | 58 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_lrc.h | 10 +++++- 3 files changed, 71 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 45e33eee76f9..200218cb157f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1422,15 +1422,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) { - const int MAX_REQUESTS_TO_SHOW = 8; struct intel_breadcrumbs * const b = &engine->breadcrumbs; - const struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_gpu_error * const error = &engine->i915->gpu_error; - struct i915_request *rq, *last; + struct i915_request *rq; intel_wakeref_t wakeref; unsigned long flags; struct rb_node *rb; - int count; if (header) { va_list ap; @@ -1494,52 +1491,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } - local_irq_save(flags); - spin_lock(&engine->timeline.lock); - - last = NULL; - count = 0; - list_for_each_entry(rq, &engine->timeline.requests, link) { - if (count++ < MAX_REQUESTS_TO_SHOW - 1) - print_request(m, rq, "\t\tE "); - else - last = rq; - } - if (last) { - if (count > MAX_REQUESTS_TO_SHOW) { - drm_printf(m, - "\t\t...skipping %d executing requests...\n", - count - MAX_REQUESTS_TO_SHOW); - } - print_request(m, last, "\t\tE "); - } - - last = NULL; - count = 0; - drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); - for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); - int i; - - priolist_for_each_request(rq, p, i) { - if (count++ < MAX_REQUESTS_TO_SHOW - 1) - print_request(m, rq, "\t\tQ "); - else - last = rq; - } - } - if (last) { - if (count > MAX_REQUESTS_TO_SHOW) { - drm_printf(m, - "\t\t...skipping %d queued requests...\n", - count - MAX_REQUESTS_TO_SHOW); - } - print_request(m, last, "\t\tQ "); - } - - spin_unlock(&engine->timeline.lock); + intel_execlists_show_requests(engine, m, print_request, 8); - spin_lock(&b->rb_lock); + spin_lock_irqsave(&b->rb_lock, flags); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { struct intel_wait *w = rb_entry(rb, typeof(*w), node); @@ -1548,8 +1502,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, task_state_to_char(w->tsk), w->seqno); } - spin_unlock(&b->rb_lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&b->rb_lock, flags); drm_printf(m, "HWSP:\n"); hexdump(m, engine->status_page.page_addr, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dcb11c5f8230..a62ad80fdf97 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2702,6 +2702,64 @@ void intel_lr_context_resume(struct drm_i915_private *i915) } } +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max) +{ + const struct intel_engine_execlists *execlists = &engine->execlists; + struct i915_request *rq, *last; + unsigned long flags; + unsigned int count; + struct rb_node *rb; + + spin_lock_irqsave(&engine->timeline.lock, flags); + + last = NULL; + count = 0; + list_for_each_entry(rq, &engine->timeline.requests, link) { + if (count++ < max - 1) + show_request(m, rq, "\t\tE "); + else + last = rq; + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d executing requests...\n", + count - max); + } + show_request(m, last, "\t\tE "); + } + + last = NULL; + count = 0; + drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + int i; + + priolist_for_each_request(rq, p, i) { + if (count++ < max - 1) + show_request(m, rq, "\t\tQ "); + else + last = rq; + } + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d queued requests...\n", + count - max); + } + show_request(m, last, "\t\tQ "); + } + + spin_unlock_irqrestore(&engine->timeline.lock, flags); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/intel_lrc.c" #endif diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index f5a5502ecf70..3d86c27c6b32 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -97,11 +97,19 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); */ #define LRC_HEADER_PAGES LRC_PPHWSP_PN +struct drm_printer; + struct drm_i915_private; struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); - void intel_execlists_set_default_submission(struct intel_engine_cs *engine); +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + struct i915_request *rq, + const char *prefix), + unsigned int max); + #endif /* _INTEL_LRC_H_ */ -- cgit v1.2.3 From 9e267d286af5c5a67995128df40ca3d1f93277a6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 15 Jan 2019 22:11:18 +0000 Subject: drm/i915/userptr: Fix error handling of mutex_lock_killable() mutex_lock_killable() returns -EINTR on failure, not the anticipate bool return like trylock. (Oh no, not again.) Fixes: 484d9a844d0d ("drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190115221118.13304-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld --- drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 38e19a42e0f4..1d3f9a31ad61 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -150,7 +150,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, switch (mutex_trylock_recursive(unlock)) { default: case MUTEX_TRYLOCK_FAILED: - if (!mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { + if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { i915_gem_object_put(obj); return -EINTR; } -- cgit v1.2.3 From 204474a6b859ff2367252d8312ac65d3245823bf Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Tue, 15 Jan 2019 15:08:00 -0500 Subject: drm/i915: Pass down rc in intel_encoder->compute_config() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Something that I completely missed when implementing the new MST VCPI atomic helpers is that with those helpers, there's technically a chance of us having to grab additional modeset locks in ->compute_config() and furthermore, that means we have the potential to hit a normal modeset deadlock. However, because ->compute_config() only returns a bool this means we can't return -EDEADLK when we need to drop locks and try again which means we end up just failing the atomic check permanently. Whoops. So, fix this by modifying ->compute_config() to pass down an actual error code instead of a bool so that the atomic check can be restarted on modeset deadlocks. Thanks to Ville Syrjälä for pointing this out! Changes since v1: * Add some newlines * Return only -EINVAL from hsw_crt_compute_config() * Propogate return code from intel_dp_compute_dsc_params() * Change all of the intel_dp_compute_link_config*() variants * Don't miss if (hdmi_port_clock_valid()) branch in intel_hdmi_compute_config() [Cherry-picked from drm-misc-next to drm-intel-next-queued to fix linux-next & drm-tip conflict, while waiting for proper propagation of the DP MST series that this commit fixes. In hindsight, a topic branch might have been a better approach for it.] Signed-off-by: Lyude Paul Cc: Ville Syrjälä Fixes: eceae1472467 ("drm/dp_mst: Start tracking per-port VCPI allocations") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109320 Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190115200800.3121-1-lyude@redhat.com (cherry picked from commit 96550555a78ca3c9fda4b358549a5622810fe32c) Signed-off-by: Jani Nikula Acked-by: Daniel Vetter --- drivers/gpu/drm/i915/icl_dsi.c | 8 ++-- drivers/gpu/drm/i915/intel_crt.c | 35 +++++++++--------- drivers/gpu/drm/i915/intel_ddi.c | 6 +-- drivers/gpu/drm/i915/intel_display.c | 11 ++++-- drivers/gpu/drm/i915/intel_dp.c | 71 +++++++++++++++++++----------------- drivers/gpu/drm/i915/intel_dp_mst.c | 12 +++--- drivers/gpu/drm/i915/intel_drv.h | 18 ++++----- drivers/gpu/drm/i915/intel_dvo.c | 11 +++--- drivers/gpu/drm/i915/intel_hdmi.c | 14 +++---- drivers/gpu/drm/i915/intel_lvds.c | 12 +++--- drivers/gpu/drm/i915/intel_sdvo.c | 14 +++---- drivers/gpu/drm/i915/intel_tv.c | 8 ++-- drivers/gpu/drm/i915/vlv_dsi.c | 14 +++---- 13 files changed, 122 insertions(+), 112 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index f3a5f03646ce..355b48d1c937 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -1188,9 +1188,9 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); } -static bool gen11_dsi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int gen11_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); @@ -1215,7 +1215,7 @@ static bool gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; - return true; + return 0; } static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 33bd2addcbdd..081c333f30d2 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -345,51 +345,52 @@ intel_crt_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + + return 0; } -static bool pch_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int pch_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + return 0; } -static bool hsw_crt_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int hsw_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* HSW/BDW FDI limited to 4k */ if (adjusted_mode->crtc_hdisplay > 4096 || adjusted_mode->crtc_hblank_start > 4096) - return false; + return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -398,7 +399,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev_priv)) { if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { DRM_DEBUG_KMS("LPT only supports 24bpp\n"); - return false; + return -EINVAL; } pipe_config->pipe_bpp = 24; @@ -407,7 +408,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, /* FDI must always be 2.7 GHz */ pipe_config->port_clock = 135000 * 2; - return true; + return 0; } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 7f3cd055de50..ce44744a5f9d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3837,9 +3837,9 @@ intel_ddi_compute_output_type(struct intel_encoder *encoder, } } -static bool intel_ddi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_ddi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ce1cdd2c0c3d..c6b3b69aaeac 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11553,10 +11553,13 @@ encoder_retry: continue; encoder = to_intel_encoder(connector_state->best_encoder); - - if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { - DRM_DEBUG_KMS("Encoder config failure\n"); - return -EINVAL; + ret = encoder->compute_config(encoder, pipe_config, + connector_state); + if (ret < 0) { + if (ret != -EDEADLK) + DRM_DEBUG_KMS("Encoder config failure: %d\n", + ret); + return ret; } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0a3ac98a779e..df4292bb1a4f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1819,7 +1819,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, } /* Optimize link config in order: max bpp, min clock, min lanes */ -static bool +static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct link_config_limits *limits) @@ -1845,17 +1845,17 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, pipe_config->pipe_bpp = bpp; pipe_config->port_clock = link_clock; - return true; + return 0; } } } } - return false; + return -EINVAL; } /* Optimize link config in order: max bpp, min lanes, min clock */ -static bool +static int intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct link_config_limits *limits) @@ -1881,13 +1881,13 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, pipe_config->pipe_bpp = bpp; pipe_config->port_clock = link_clock; - return true; + return 0; } } } } - return false; + return -EINVAL; } static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) @@ -1905,19 +1905,20 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) return 0; } -static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state, - struct link_config_limits *limits) +static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; u8 dsc_max_bpc; int pipe_bpp; + int ret; if (!intel_dp_supports_dsc(intel_dp, pipe_config)) - return false; + return -EINVAL; dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, conn_state->max_requested_bpc); @@ -1925,7 +1926,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); - return false; + return -EINVAL; } /* @@ -1959,7 +1960,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, adjusted_mode->crtc_hdisplay); if (!dsc_max_output_bpp || !dsc_dp_slice_count) { DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); - return false; + return -EINVAL; } pipe_config->dsc_params.compressed_bpp = min_t(u16, dsc_max_output_bpp >> 4, @@ -1976,16 +1977,19 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc_params.dsc_split = true; } else { DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); - return false; + return -EINVAL; } } - if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) { + + ret = intel_dp_compute_dsc_params(intel_dp, pipe_config); + if (ret < 0) { DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " "Compressed BPP = %d\n", pipe_config->pipe_bpp, pipe_config->dsc_params.compressed_bpp); - return false; + return ret; } + pipe_config->dsc_params.compression_enable = true; DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " "Compressed Bpp = %d Slice Count = %d\n", @@ -1993,10 +1997,10 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc_params.compressed_bpp, pipe_config->dsc_params.slice_count); - return true; + return 0; } -static bool +static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2005,7 +2009,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct link_config_limits limits; int common_len; - bool ret; + int ret; common_len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); @@ -2063,10 +2067,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, /* enable compression if the mode doesn't fit available BW */ DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); - if (!ret || intel_dp->force_dsc_en) { - if (!intel_dp_dsc_compute_config(intel_dp, pipe_config, - conn_state, &limits)) - return false; + if (ret || intel_dp->force_dsc_en) { + ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, + conn_state, &limits); + if (ret < 0) + return ret; } if (pipe_config->dsc_params.compression_enable) { @@ -2091,10 +2096,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } - return true; + return 0; } -bool +int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2110,6 +2115,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, to_intel_digital_connector_state(conn_state); bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); + int ret; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) pipe_config->has_pch_encoder = true; @@ -2131,8 +2137,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (INTEL_GEN(dev_priv) >= 9) { - int ret; - ret = skl_update_scaler_crtc(pipe_config); if (ret) return ret; @@ -2147,20 +2151,21 @@ intel_dp_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; if (HAS_GMCH_DISPLAY(dev_priv) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - return false; + return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) - return false; + return -EINVAL; pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && intel_dp_supports_fec(intel_dp, pipe_config); - if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state)) - return false; + ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); + if (ret < 0) + return ret; if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* @@ -2208,7 +2213,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); - return true; + return 0; } void intel_dp_set_link_params(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index a8d43ad5352c..778c887108b7 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -29,9 +29,9 @@ #include #include -static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); @@ -48,7 +48,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, DP_DPCD_QUIRK_CONSTANT_N); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; @@ -85,7 +85,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, if (slots < 0) { DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); - return false; + return slots; } } @@ -103,7 +103,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); - return true; + return 0; } static int intel_dp_mst_atomic_check(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5e5ceec7c004..9ecff07598d9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -225,9 +225,9 @@ struct intel_encoder { enum intel_output_type (*compute_output_type)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); - bool (*compute_config)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + int (*compute_config)(struct intel_encoder *, + struct intel_crtc_state *, + struct drm_connector_state *); void (*pre_pll_enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -1817,9 +1817,9 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_flush_work(struct drm_encoder *encoder); -bool intel_dp_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state); +int intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state); bool intel_dp_is_edp(struct intel_dp *intel_dp); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, @@ -1979,9 +1979,9 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); -bool intel_hdmi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state); +int intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state); bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index bc3c3cb57ec6..a6c82482a841 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -234,9 +234,9 @@ intel_dvo_mode_valid(struct drm_connector *connector, return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } -static bool intel_dvo_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_dvo *intel_dvo = enc_to_dvo(encoder); const struct drm_display_mode *fixed_mode = @@ -253,10 +253,11 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, intel_fixed_panel_mode(fixed_mode, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - return true; + + return 0; } static void intel_dvo_pre_enable(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 14727ac06f67..51ec81d41dca 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1708,9 +1708,9 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return true; } -bool intel_hdmi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +int intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1726,7 +1726,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; @@ -1757,7 +1757,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, &clock_12bpc, &clock_10bpc, &clock_8bpc)) { DRM_ERROR("Can't support YCBCR420 output\n"); - return false; + return -EINVAL; } } @@ -1807,7 +1807,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, false, force_dvi) != MODE_OK) { DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); - return false; + return -EINVAL; } /* Set user selected PAR to incoming mode's member */ @@ -1826,7 +1826,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, } } - return true; + return 0; } static void diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b01aacb5d73d..46a5dfd5cdf7 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -380,9 +380,9 @@ intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = @@ -396,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, /* Should never happen!! */ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) { DRM_ERROR("Can't support LVDS on pipe A\n"); - return false; + return -EINVAL; } if (lvds_encoder->a3_power == LVDS_A3_POWER_UP) @@ -422,7 +422,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; if (HAS_PCH_SPLIT(dev_priv)) { pipe_config->has_pch_encoder = true; @@ -441,7 +441,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, * user's requested refresh rate. */ - return true; + return 0; } static enum drm_connector_status diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index b08fed11219f..ba58b06f730d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1107,9 +1107,9 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) pipe_config->clock_set = true; } -static bool intel_sdvo_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_sdvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_connector_state *intel_sdvo_state = @@ -1134,7 +1134,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, */ if (IS_TV(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) - return false; + return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, @@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, } else if (IS_LVDS(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo_connector->base.panel.fixed_mode)) - return false; + return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, @@ -1153,7 +1153,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* * Make the CRTC code factor in the SDVO pixel multiplier. The @@ -1193,7 +1193,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, if (intel_sdvo_connector->is_hdmi) adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; - return true; + return 0; } #define UPDATE_PROPERTY(input, NAME) \ diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d7a414ce2774..bd5536f0ec92 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -869,7 +869,7 @@ intel_tv_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } -static bool +static int intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -879,10 +879,10 @@ intel_tv_compute_config(struct intel_encoder *encoder, &pipe_config->base.adjusted_mode; if (!tv_mode) - return false; + return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; adjusted_mode->crtc_clock = tv_mode->clock; @@ -897,7 +897,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, * or whether userspace is doing something stupid. */ - return true; + return 0; } static void diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index d116fead8514..c247ce74b71a 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -256,9 +256,9 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->sb_lock); } -static bool intel_dsi_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) +static int intel_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, @@ -284,7 +284,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) - return false; + return -EINVAL; /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; @@ -302,16 +302,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, ret = bxt_dsi_pll_compute(encoder, pipe_config); if (ret) - return false; + return -EINVAL; } else { ret = vlv_dsi_pll_compute(encoder, pipe_config); if (ret) - return false; + return -EINVAL; } pipe_config->clock_set = true; - return true; + return 0; } static bool glk_dsi_enable_io(struct intel_encoder *encoder) -- cgit v1.2.3 From 18bb2bccb5492fb5c36908191b8af77e54c58814 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 21:04:01 +0000 Subject: drm/i915: Serialise concurrent calls to i915_gem_set_wedged() Make i915_gem_set_wedged() and i915_gem_unset_wedged() behaviour more consistent if called concurrently, and only do the wedging and reporting once, curtailing any possible race where we start unwedging in the middle of a wedge. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114210408.4561-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 32 ++++++++++++++++++------ drivers/gpu/drm/i915/i915_gpu_error.h | 4 ++- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 1 + 3 files changed, 28 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 565b2fa1607d..5c6089777fde 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3187,10 +3187,15 @@ static void nop_submit_request(struct i915_request *request) void i915_gem_set_wedged(struct drm_i915_private *i915) { + struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; - GEM_TRACE("start\n"); + mutex_lock(&error->wedge_mutex); + if (test_bit(I915_WEDGED, &error->flags)) { + mutex_unlock(&error->wedge_mutex); + return; + } if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { struct drm_printer p = drm_debug_printer(__func__); @@ -3199,8 +3204,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_engine_dump(engine, &p, "%s\n", engine->name); } - if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags)) - goto out; + GEM_TRACE("start\n"); /* * First, stop submission to hw, but do not yet complete requests by @@ -3236,23 +3240,31 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_engine_wakeup(engine); } -out: + smp_mb__before_atomic(); + set_bit(I915_WEDGED, &error->flags); + GEM_TRACE("end\n"); + mutex_unlock(&error->wedge_mutex); - wake_up_all(&i915->gpu_error.reset_queue); + wake_up_all(&error->reset_queue); } bool i915_gem_unset_wedged(struct drm_i915_private *i915) { + struct i915_gpu_error *error = &i915->gpu_error; struct i915_timeline *tl; + bool ret = false; lockdep_assert_held(&i915->drm.struct_mutex); - if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) + + if (!test_bit(I915_WEDGED, &error->flags)) return true; if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ return false; + mutex_lock(&error->wedge_mutex); + GEM_TRACE("start\n"); /* @@ -3286,7 +3298,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) */ if (dma_fence_default_wait(&rq->fence, true, MAX_SCHEDULE_TIMEOUT) < 0) - return false; + goto unlock; } i915_retire_requests(i915); GEM_BUG_ON(i915->gt.active_requests); @@ -3309,8 +3321,11 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, &i915->gpu_error.flags); + ret = true; +unlock: + mutex_unlock(&i915->gpu_error.wedge_mutex); - return true; + return ret; } static void @@ -5706,6 +5721,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) i915_gem_idle_work_handler); init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); + mutex_init(&dev_priv->gpu_error.wedge_mutex); atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 6d9f45468ac1..604291f7762d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -271,8 +271,8 @@ struct i915_gpu_error { #define I915_RESET_BACKOFF 0 #define I915_RESET_HANDOFF 1 #define I915_RESET_MODESET 2 +#define I915_RESET_ENGINE 3 #define I915_WEDGED (BITS_PER_LONG - 1) -#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) /** Number of times an engine has been reset */ u32 reset_engine_count[I915_NUM_ENGINES]; @@ -283,6 +283,8 @@ struct i915_gpu_error { /** Reason for the current *global* reset */ const char *reason; + struct mutex wedge_mutex; /* serialises wedging/unwedging */ + /** * Waitqueue to signal when a hang is detected. Used to for waiters * to release the struct_mutex for the reset to procede. diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 082809569681..3cda66292e76 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -188,6 +188,7 @@ struct drm_i915_private *mock_gem_device(void) init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); + mutex_init(&i915->gpu_error.wedge_mutex); i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) -- cgit v1.2.3 From 9f58892ea9962002399132fd3f40c6a273f8d9e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 16 Jan 2019 15:33:04 +0000 Subject: drm/i915: Pull all the reset functionality together into i915_reset.c Currently the code to reset the GPU and our state is spread widely across a few files. Pull the logic together into a common file. Signed-off-by: Chris Wilson Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190116153304.787-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 + drivers/gpu/drm/i915/i915_drv.c | 206 +-- drivers/gpu/drm/i915/i915_drv.h | 33 +- drivers/gpu/drm/i915/i915_gem.c | 446 +------ drivers/gpu/drm/i915/i915_gem_gtt.c | 1 + drivers/gpu/drm/i915/i915_irq.c | 238 ---- drivers/gpu/drm/i915/i915_request.c | 1 + drivers/gpu/drm/i915/i915_reset.c | 1389 ++++++++++++++++++++ drivers/gpu/drm/i915/i915_reset.h | 56 + drivers/gpu/drm/i915/intel_display.c | 15 +- drivers/gpu/drm/i915/intel_engine_cs.c | 1 + drivers/gpu/drm/i915/intel_guc.h | 3 + drivers/gpu/drm/i915/intel_hangcheck.c | 1 + drivers/gpu/drm/i915/intel_uc.c | 1 + drivers/gpu/drm/i915/intel_uncore.c | 556 -------- drivers/gpu/drm/i915/selftests/intel_lrc.c | 2 + drivers/gpu/drm/i915/selftests/intel_workarounds.c | 1 + 18 files changed, 1483 insertions(+), 1472 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_reset.c create mode 100644 drivers/gpu/drm/i915/i915_reset.h (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c34bee16730d..611115ed00db 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -40,9 +40,10 @@ i915-y := i915_drv.o \ i915_mm.o \ i915_params.o \ i915_pci.o \ + i915_reset.o \ i915_suspend.o \ - i915_syncmap.o \ i915_sw_fence.o \ + i915_syncmap.o \ i915_sysfs.o \ intel_csr.o \ intel_device_info.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 24e2d52efa8e..ece72e0e41bc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,8 @@ #include "intel_drv.h" #include "intel_guc_submission.h" +#include "i915_reset.h" + static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { return to_i915(node->minor->dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dafbbfadd1ad..f462a4d28af4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -48,6 +48,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "i915_pmu.h" +#include "i915_reset.h" #include "i915_query.h" #include "i915_vgpu.h" #include "intel_drv.h" @@ -2205,211 +2206,6 @@ static int i915_resume_switcheroo(struct drm_device *dev) return i915_drm_resume(dev); } -/** - * i915_reset - reset chip after a hang - * @i915: #drm_i915_private to reset - * @stalled_mask: mask of the stalled engines with the guilty requests - * @reason: user error message for why we are resetting - * - * Reset the chip. Useful if a hang is detected. Marks the device as wedged - * on failure. - * - * Caller must hold the struct_mutex. - * - * Procedure is fairly simple: - * - reset the chip using the reset reg - * - re-init context state - * - re-init hardware status page - * - re-init ring buffer - * - re-init interrupt state - * - re-init display - */ -void i915_reset(struct drm_i915_private *i915, - unsigned int stalled_mask, - const char *reason) -{ - struct i915_gpu_error *error = &i915->gpu_error; - int ret; - int i; - - GEM_TRACE("flags=%lx\n", error->flags); - - might_sleep(); - lockdep_assert_held(&i915->drm.struct_mutex); - assert_rpm_wakelock_held(i915); - GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - - if (!test_bit(I915_RESET_HANDOFF, &error->flags)) - return; - - /* Clear any previous failed attempts at recovery. Time to try again. */ - if (!i915_gem_unset_wedged(i915)) - goto wakeup; - - if (reason) - dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); - error->reset_count++; - - ret = i915_gem_reset_prepare(i915); - if (ret) { - dev_err(i915->drm.dev, "GPU recovery failed\n"); - goto taint; - } - - if (!intel_has_gpu_reset(i915)) { - if (i915_modparams.reset) - dev_err(i915->drm.dev, "GPU reset not supported\n"); - else - DRM_DEBUG_DRIVER("GPU reset disabled\n"); - goto error; - } - - for (i = 0; i < 3; i++) { - ret = intel_gpu_reset(i915, ALL_ENGINES); - if (ret == 0) - break; - - msleep(100); - } - if (ret) { - dev_err(i915->drm.dev, "Failed to reset chip\n"); - goto taint; - } - - /* Ok, now get things going again... */ - - /* - * Everything depends on having the GTT running, so we need to start - * there. - */ - ret = i915_ggtt_enable_hw(i915); - if (ret) { - DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", - ret); - goto error; - } - - i915_gem_reset(i915, stalled_mask); - intel_overlay_reset(i915); - - /* - * Next we need to restore the context, but we don't use those - * yet either... - * - * Ring buffer needs to be re-initialized in the KMS case, or if X - * was running at the time of the reset (i.e. we weren't VT - * switched away). - */ - ret = i915_gem_init_hw(i915); - if (ret) { - DRM_ERROR("Failed to initialise HW following reset (%d)\n", - ret); - goto error; - } - - i915_queue_hangcheck(i915); - -finish: - i915_gem_reset_finish(i915); -wakeup: - clear_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_bit(&error->flags, I915_RESET_HANDOFF); - return; - -taint: - /* - * History tells us that if we cannot reset the GPU now, we - * never will. This then impacts everything that is run - * subsequently. On failing the reset, we mark the driver - * as wedged, preventing further execution on the GPU. - * We also want to go one step further and add a taint to the - * kernel so that any subsequent faults can be traced back to - * this failure. This is important for CI, where if the - * GPU/driver fails we would like to reboot and restart testing - * rather than continue on into oblivion. For everyone else, - * the system should still plod along, but they have been warned! - */ - add_taint(TAINT_WARN, LOCKDEP_STILL_OK); -error: - i915_gem_set_wedged(i915); - i915_retire_requests(i915); - goto finish; -} - -static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - return intel_gpu_reset(dev_priv, intel_engine_flag(engine)); -} - -/** - * i915_reset_engine - reset GPU engine to recover from a hang - * @engine: engine to reset - * @msg: reason for GPU reset; or NULL for no dev_notice() - * - * Reset a specific GPU engine. Useful if a hang is detected. - * Returns zero on successful reset or otherwise an error code. - * - * Procedure is: - * - identifies the request that caused the hang and it is dropped - * - reset engine (which will force the engine to idle) - * - re-init/configure engine - */ -int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) -{ - struct i915_gpu_error *error = &engine->i915->gpu_error; - struct i915_request *active_request; - int ret; - - GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); - GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - - active_request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR_OR_NULL(active_request)) { - /* Either the previous reset failed, or we pardon the reset. */ - ret = PTR_ERR(active_request); - goto out; - } - - if (msg) - dev_notice(engine->i915->drm.dev, - "Resetting %s for %s\n", engine->name, msg); - error->reset_engine_count[engine->id]++; - - if (!engine->i915->guc.execbuf_client) - ret = intel_gt_reset_engine(engine->i915, engine); - else - ret = intel_guc_reset_engine(&engine->i915->guc, engine); - if (ret) { - /* If we fail here, we expect to fallback to a global reset */ - DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - engine->i915->guc.execbuf_client ? "GuC " : "", - engine->name, ret); - goto out; - } - - /* - * The request that caused the hang is stuck on elsp, we know the - * active request and can drop it, adjust head to skip the offending - * request to resume executing remaining requests in the queue. - */ - i915_gem_reset_engine(engine, active_request, true); - - /* - * The engine and its registers (and workarounds in case of render) - * have been reset to their default values. Follow the init_ring - * process to program RING_MODE, HWSP and re-enable submission. - */ - ret = engine->init_hw(engine); - if (ret) - goto out; - -out: - intel_engine_cancel_stop_cs(engine); - i915_gem_reset_finish_engine(engine); - return ret; -} - static int i915_pm_prepare(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index da055a86db4d..310d9e1e1620 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2615,19 +2615,7 @@ extern const struct dev_pm_ops i915_pm_ops; extern int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); extern void i915_driver_unload(struct drm_device *dev); -extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); - -extern void i915_reset(struct drm_i915_private *i915, - unsigned int stalled_mask, - const char *reason); -extern int i915_reset_engine(struct intel_engine_cs *engine, - const char *reason); - -extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); -extern int intel_reset_guc(struct drm_i915_private *dev_priv); -extern int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine); + extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2670,20 +2658,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) &dev_priv->gpu_error.hangcheck_work, delay); } -__printf(4, 5) -void i915_handle_error(struct drm_i915_private *dev_priv, - u32 engine_mask, - unsigned long flags, - const char *fmt, ...); -#define I915_ERROR_CAPTURE BIT(0) - extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -void i915_clear_error_registers(struct drm_i915_private *dev_priv); - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; @@ -3048,18 +3027,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, return READ_ONCE(error->reset_engine_count[engine->id]); } -struct i915_request * -i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); -int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); -void i915_gem_reset(struct drm_i915_private *dev_priv, - unsigned int stalled_mask); -void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); -void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); -void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5c6089777fde..7185a5b4a5ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -27,15 +27,6 @@ #include #include -#include "i915_drv.h" -#include "i915_gem_clflush.h" -#include "i915_vgpu.h" -#include "i915_trace.h" -#include "intel_drv.h" -#include "intel_frontbuffer.h" -#include "intel_mocs.h" -#include "intel_workarounds.h" -#include "i915_gemfs.h" #include #include #include @@ -46,6 +37,18 @@ #include #include +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "i915_gemfs.h" +#include "i915_reset.h" +#include "i915_trace.h" +#include "i915_vgpu.h" + +#include "intel_drv.h" +#include "intel_frontbuffer.h" +#include "intel_mocs.h" +#include "intel_workarounds.h" + static void i915_gem_flush_free_objects(struct drm_i915_private *i915); static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) @@ -2873,61 +2876,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } -static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, - const struct i915_gem_context *ctx) -{ - unsigned int score; - unsigned long prev_hang; - - if (i915_gem_context_is_banned(ctx)) - score = I915_CLIENT_SCORE_CONTEXT_BAN; - else - score = 0; - - prev_hang = xchg(&file_priv->hang_timestamp, jiffies); - if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) - score += I915_CLIENT_SCORE_HANG_FAST; - - if (score) { - atomic_add(score, &file_priv->ban_score); - - DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", - ctx->name, score, - atomic_read(&file_priv->ban_score)); - } -} - -static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) -{ - unsigned int score; - bool banned, bannable; - - atomic_inc(&ctx->guilty_count); - - bannable = i915_gem_context_is_bannable(ctx); - score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - - /* Cool contexts don't accumulate client ban score */ - if (!bannable) - return; - - if (banned) { - DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", - ctx->name, atomic_read(&ctx->guilty_count), - score); - i915_gem_context_set_banned(ctx); - } - - if (!IS_ERR_OR_NULL(ctx->file_priv)) - i915_gem_client_mark_guilty(ctx->file_priv, ctx); -} - -static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) -{ - atomic_inc(&ctx->active_count); -} - struct i915_request * i915_gem_find_active_request(struct intel_engine_cs *engine) { @@ -2958,376 +2906,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) return active; } -/* - * Ensure irq handler finishes, and not run again. - * Also return the active request so that we only search for it once. - */ -struct i915_request * -i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) -{ - struct i915_request *request; - - /* - * During the reset sequence, we must prevent the engine from - * entering RC6. As the context state is undefined until we restart - * the engine, if it does enter RC6 during the reset, the state - * written to the powercontext is undefined and so we may lose - * GPU state upon resume, i.e. fail to restart after a reset. - */ - intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); - - request = engine->reset.prepare(engine); - if (request && request->fence.error == -EIO) - request = ERR_PTR(-EIO); /* Previous reset failed! */ - - return request; -} - -int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - struct i915_request *request; - enum intel_engine_id id; - int err = 0; - - for_each_engine(engine, dev_priv, id) { - request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR(request)) { - err = PTR_ERR(request); - continue; - } - - engine->hangcheck.active_request = request; - } - - i915_gem_revoke_fences(dev_priv); - intel_uc_sanitize(dev_priv); - - return err; -} - -static void engine_skip_context(struct i915_request *request) -{ - struct intel_engine_cs *engine = request->engine; - struct i915_gem_context *hung_ctx = request->gem_context; - struct i915_timeline *timeline = request->timeline; - unsigned long flags; - - GEM_BUG_ON(timeline == &engine->timeline); - - spin_lock_irqsave(&engine->timeline.lock, flags); - spin_lock(&timeline->lock); - - list_for_each_entry_continue(request, &engine->timeline.requests, link) - if (request->gem_context == hung_ctx) - i915_request_skip(request, -EIO); - - list_for_each_entry(request, &timeline->requests, link) - i915_request_skip(request, -EIO); - - spin_unlock(&timeline->lock); - spin_unlock_irqrestore(&engine->timeline.lock, flags); -} - -/* Returns the request if it was guilty of the hang */ -static struct i915_request * -i915_gem_reset_request(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled) -{ - /* The guilty request will get skipped on a hung engine. - * - * Users of client default contexts do not rely on logical - * state preserved between batches so it is safe to execute - * queued requests following the hang. Non default contexts - * rely on preserved state, so skipping a batch loses the - * evolution of the state and it needs to be considered corrupted. - * Executing more queued batches on top of corrupted state is - * risky. But we take the risk by trying to advance through - * the queued requests in order to make the client behaviour - * more predictable around resets, by not throwing away random - * amount of batches it has prepared for execution. Sophisticated - * clients can use gem_reset_stats_ioctl and dma fence status - * (exported via sync_file info ioctl on explicit fences) to observe - * when it loses the context state and should rebuild accordingly. - * - * The context ban, and ultimately the client ban, mechanism are safety - * valves if client submission ends up resulting in nothing more than - * subsequent hangs. - */ - - if (i915_request_completed(request)) { - GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n", - engine->name, request->global_seqno, - request->fence.context, request->fence.seqno, - intel_engine_get_seqno(engine)); - stalled = false; - } - - if (stalled) { - i915_gem_context_mark_guilty(request->gem_context); - i915_request_skip(request, -EIO); - - /* If this context is now banned, skip all pending requests. */ - if (i915_gem_context_is_banned(request->gem_context)) - engine_skip_context(request); - } else { - /* - * Since this is not the hung engine, it may have advanced - * since the hang declaration. Double check by refinding - * the active request at the time of the reset. - */ - request = i915_gem_find_active_request(engine); - if (request) { - unsigned long flags; - - i915_gem_context_mark_innocent(request->gem_context); - dma_fence_set_error(&request->fence, -EAGAIN); - - /* Rewind the engine to replay the incomplete rq */ - spin_lock_irqsave(&engine->timeline.lock, flags); - request = list_prev_entry(request, link); - if (&request->link == &engine->timeline.requests) - request = NULL; - spin_unlock_irqrestore(&engine->timeline.lock, flags); - } - } - - return request; -} - -void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct i915_request *request, - bool stalled) -{ - if (request) - request = i915_gem_reset_request(engine, request, stalled); - - /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset.reset(engine, request); -} - -void i915_gem_reset(struct drm_i915_private *dev_priv, - unsigned int stalled_mask) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - i915_retire_requests(dev_priv); - - for_each_engine(engine, dev_priv, id) { - struct intel_context *ce; - - i915_gem_reset_engine(engine, - engine->hangcheck.active_request, - stalled_mask & ENGINE_MASK(id)); - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); - - /* - * Ostensibily, we always want a context loaded for powersaving, - * so if the engine is idle after the reset, send a request - * to load our scratch kernel_context. - * - * More mysteriously, if we leave the engine idle after a reset, - * the next userspace batch may hang, with what appears to be - * an incoherent read by the CS (presumably stale TLB). An - * empty request appears sufficient to paper over the glitch. - */ - if (intel_engine_is_idle(engine)) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, - dev_priv->kernel_context); - if (!IS_ERR(rq)) - i915_request_add(rq); - } - } - - i915_gem_restore_fences(dev_priv); -} - -void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) -{ - engine->reset.finish(engine); - - intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); -} - -void i915_gem_reset_finish(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for_each_engine(engine, dev_priv, id) { - engine->hangcheck.active_request = NULL; - i915_gem_reset_finish_engine(engine); - } -} - -static void nop_submit_request(struct i915_request *request) -{ - unsigned long flags; - - GEM_TRACE("%s fence %llx:%d -> -EIO\n", - request->engine->name, - request->fence.context, request->fence.seqno); - dma_fence_set_error(&request->fence, -EIO); - - spin_lock_irqsave(&request->engine->timeline.lock, flags); - __i915_request_submit(request); - intel_engine_write_global_seqno(request->engine, request->global_seqno); - spin_unlock_irqrestore(&request->engine->timeline.lock, flags); -} - -void i915_gem_set_wedged(struct drm_i915_private *i915) -{ - struct i915_gpu_error *error = &i915->gpu_error; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - mutex_lock(&error->wedge_mutex); - if (test_bit(I915_WEDGED, &error->flags)) { - mutex_unlock(&error->wedge_mutex); - return; - } - - if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { - struct drm_printer p = drm_debug_printer(__func__); - - for_each_engine(engine, i915, id) - intel_engine_dump(engine, &p, "%s\n", engine->name); - } - - GEM_TRACE("start\n"); - - /* - * First, stop submission to hw, but do not yet complete requests by - * rolling the global seqno forward (since this would complete requests - * for which we haven't set the fence error to EIO yet). - */ - for_each_engine(engine, i915, id) - i915_gem_reset_prepare_engine(engine); - - /* Even if the GPU reset fails, it should still stop the engines */ - if (INTEL_GEN(i915) >= 5) - intel_gpu_reset(i915, ALL_ENGINES); - - for_each_engine(engine, i915, id) { - engine->submit_request = nop_submit_request; - engine->schedule = NULL; - } - i915->caps.scheduler = 0; - - /* - * Make sure no request can slip through without getting completed by - * either this call here to intel_engine_write_global_seqno, or the one - * in nop_submit_request. - */ - synchronize_rcu(); - - /* Mark all executing requests as skipped */ - for_each_engine(engine, i915, id) - engine->cancel_requests(engine); - - for_each_engine(engine, i915, id) { - i915_gem_reset_finish_engine(engine); - intel_engine_wakeup(engine); - } - - smp_mb__before_atomic(); - set_bit(I915_WEDGED, &error->flags); - - GEM_TRACE("end\n"); - mutex_unlock(&error->wedge_mutex); - - wake_up_all(&error->reset_queue); -} - -bool i915_gem_unset_wedged(struct drm_i915_private *i915) -{ - struct i915_gpu_error *error = &i915->gpu_error; - struct i915_timeline *tl; - bool ret = false; - - lockdep_assert_held(&i915->drm.struct_mutex); - - if (!test_bit(I915_WEDGED, &error->flags)) - return true; - - if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ - return false; - - mutex_lock(&error->wedge_mutex); - - GEM_TRACE("start\n"); - - /* - * Before unwedging, make sure that all pending operations - * are flushed and errored out - we may have requests waiting upon - * third party fences. We marked all inflight requests as EIO, and - * every execbuf since returned EIO, for consistency we want all - * the currently pending requests to also be marked as EIO, which - * is done inside our nop_submit_request - and so we must wait. - * - * No more can be submitted until we reset the wedged bit. - */ - list_for_each_entry(tl, &i915->gt.timelines, link) { - struct i915_request *rq; - - rq = i915_gem_active_peek(&tl->last_request, - &i915->drm.struct_mutex); - if (!rq) - continue; - - /* - * We can't use our normal waiter as we want to - * avoid recursively trying to handle the current - * reset. The basic dma_fence_default_wait() installs - * a callback for dma_fence_signal(), which is - * triggered by our nop handler (indirectly, the - * callback enables the signaler thread which is - * woken by the nop_submit_request() advancing the seqno - * and when the seqno passes the fence, the signaler - * then signals the fence waking us up). - */ - if (dma_fence_default_wait(&rq->fence, true, - MAX_SCHEDULE_TIMEOUT) < 0) - goto unlock; - } - i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); - - intel_engines_sanitize(i915, false); - - /* - * Undo nop_submit_request. We prevent all new i915 requests from - * being queued (by disallowing execbuf whilst wedged) so having - * waited for all active requests above, we know the system is idle - * and do not have to worry about a thread being inside - * engine->submit_request() as we swap over. So unlike installing - * the nop_submit_request on reset, we can do this from normal - * context and do not require stop_machine(). - */ - intel_engines_reset_default_submission(i915); - i915_gem_contexts_lost(i915); - - GEM_TRACE("end\n"); - - smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ - clear_bit(I915_WEDGED, &i915->gpu_error.flags); - ret = true; -unlock: - mutex_unlock(&i915->gpu_error.wedge_mutex); - - return ret; -} - static void i915_gem_retire_work_handler(struct work_struct *work) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d24628f184e4..9081e3bc5a59 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -37,6 +37,7 @@ #include "i915_drv.h" #include "i915_vgpu.h" +#include "i915_reset.h" #include "i915_trace.h" #include "intel_drv.h" #include "intel_frontbuffer.h" diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 94187e68d39a..1c6cf024a509 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2930,46 +2930,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -struct wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const char *name; -}; - -static void wedge_me(struct work_struct *work) -{ - struct wedge_me *w = container_of(work, typeof(*w), work.work); - - dev_err(w->i915->drm.dev, - "%s timed out, cancelling all in-flight rendering.\n", - w->name); - i915_gem_set_wedged(w->i915); -} - -static void __init_wedge(struct wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) -{ - w->i915 = i915; - w->name = name; - - INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __fini_wedge(struct wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __fini_wedge((W))) - static u32 gen11_gt_engine_identity(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit) @@ -3180,204 +3140,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void i915_reset_device(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *reason) -{ - struct i915_gpu_error *error = &dev_priv->gpu_error; - struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; - char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; - char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; - char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; - struct wedge_me w; - - kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); - - DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); - - /* Use a watchdog to ensure that our reset completes */ - i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { - intel_prepare_reset(dev_priv); - - error->reason = reason; - error->stalled_mask = engine_mask; - - /* Signal that locked waiters should reset the GPU */ - smp_mb__before_atomic(); - set_bit(I915_RESET_HANDOFF, &error->flags); - wake_up_all(&error->wait_queue); - - /* Wait for anyone holding the lock to wakeup, without - * blocking indefinitely on struct_mutex. - */ - do { - if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - i915_reset(dev_priv, engine_mask, reason); - mutex_unlock(&dev_priv->drm.struct_mutex); - } - } while (wait_on_bit_timeout(&error->flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE, - 1)); - - error->stalled_mask = 0; - error->reason = NULL; - - intel_finish_reset(dev_priv); - } - - if (!test_bit(I915_WEDGED, &error->flags)) - kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); -} - -void i915_clear_error_registers(struct drm_i915_private *dev_priv) -{ - u32 eir; - - if (!IS_GEN(dev_priv, 2)) - I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); - - if (INTEL_GEN(dev_priv) < 4) - I915_WRITE(IPEIR, I915_READ(IPEIR)); - else - I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); - - I915_WRITE(EIR, I915_READ(EIR)); - eir = I915_READ(EIR); - if (eir) { - /* - * some errors might have become stuck, - * mask them. - */ - DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); - I915_WRITE(EMR, I915_READ(EMR) | eir); - I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); - } - - if (INTEL_GEN(dev_priv) >= 8) { - I915_WRITE(GEN8_RING_FAULT_REG, - I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); - POSTING_READ(GEN8_RING_FAULT_REG); - } else if (INTEL_GEN(dev_priv) >= 6) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) { - I915_WRITE(RING_FAULT_REG(engine), - I915_READ(RING_FAULT_REG(engine)) & - ~RING_FAULT_VALID); - } - POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); - } -} - -/** - * i915_handle_error - handle a gpu error - * @dev_priv: i915 device private - * @engine_mask: mask representing engines that are hung - * @flags: control flags - * @fmt: Error message format string - * - * Do some basic checking of register state at error time and - * dump it to the syslog. Also call i915_capture_error_state() to make - * sure we get a record and make it available in debugfs. Fire a uevent - * so userspace knows something bad happened (should trigger collection - * of a ring dump etc.). - */ -void i915_handle_error(struct drm_i915_private *dev_priv, - u32 engine_mask, - unsigned long flags, - const char *fmt, ...) -{ - struct intel_engine_cs *engine; - intel_wakeref_t wakeref; - unsigned int tmp; - char error_msg[80]; - char *msg = NULL; - - if (fmt) { - va_list args; - - va_start(args, fmt); - vscnprintf(error_msg, sizeof(error_msg), fmt, args); - va_end(args); - - msg = error_msg; - } - - /* - * In most cases it's guaranteed that we get here with an RPM - * reference held, for example because there is a pending GPU - * request that won't finish until the reset is done. This - * isn't the case at least when we get here by doing a - * simulated reset via debugfs, so get an RPM reference. - */ - wakeref = intel_runtime_pm_get(dev_priv); - - engine_mask &= INTEL_INFO(dev_priv)->ring_mask; - - if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(dev_priv, engine_mask, msg); - i915_clear_error_registers(dev_priv); - } - - /* - * Try engine reset when available. We fall back to full reset if - * single reset fails. - */ - if (intel_has_reset_engine(dev_priv) && - !i915_terminally_wedged(&dev_priv->gpu_error)) { - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); - if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags)) - continue; - - if (i915_reset_engine(engine, msg) == 0) - engine_mask &= ~intel_engine_flag(engine); - - clear_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags); - wake_up_bit(&dev_priv->gpu_error.flags, - I915_RESET_ENGINE + engine->id); - } - } - - if (!engine_mask) - goto out; - - /* Full reset needs the mutex, stop any other user trying to do so. */ - if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { - wait_event(dev_priv->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &dev_priv->gpu_error.flags)); - goto out; - } - - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, dev_priv, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags)) - wait_on_bit(&dev_priv->gpu_error.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); - } - - i915_reset_device(dev_priv, engine_mask, msg); - - for_each_engine(engine, dev_priv, tmp) { - clear_bit(I915_RESET_ENGINE + engine->id, - &dev_priv->gpu_error.flags); - } - - clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.reset_queue); - -out: - intel_runtime_pm_put(dev_priv, wakeref); -} - /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index f3c3593362ec..33eb9df0dd0e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,7 @@ #include #include "i915_drv.h" +#include "i915_reset.h" static const char *i915_fence_get_driver_name(struct dma_fence *fence) { diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c new file mode 100644 index 000000000000..342d9ee42601 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -0,0 +1,1389 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#include + +#include "i915_drv.h" +#include "i915_gpu_error.h" +#include "i915_reset.h" + +#include "intel_guc.h" + +static void engine_skip_context(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *hung_ctx = rq->gem_context; + struct i915_timeline *timeline = rq->timeline; + unsigned long flags; + + GEM_BUG_ON(timeline == &engine->timeline); + + spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock(&timeline->lock); + + list_for_each_entry_continue(rq, &engine->timeline.requests, link) + if (rq->gem_context == hung_ctx) + i915_request_skip(rq, -EIO); + + list_for_each_entry(rq, &timeline->requests, link) + i915_request_skip(rq, -EIO); + + spin_unlock(&timeline->lock); + spin_unlock_irqrestore(&engine->timeline.lock, flags); +} + +static void client_mark_guilty(struct drm_i915_file_private *file_priv, + const struct i915_gem_context *ctx) +{ + unsigned int score; + unsigned long prev_hang; + + if (i915_gem_context_is_banned(ctx)) + score = I915_CLIENT_SCORE_CONTEXT_BAN; + else + score = 0; + + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) + score += I915_CLIENT_SCORE_HANG_FAST; + + if (score) { + atomic_add(score, &file_priv->ban_score); + + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); + } +} + +static void context_mark_guilty(struct i915_gem_context *ctx) +{ + unsigned int score; + bool banned, bannable; + + atomic_inc(&ctx->guilty_count); + + bannable = i915_gem_context_is_bannable(ctx); + score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); + banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; + + /* Cool contexts don't accumulate client ban score */ + if (!bannable) + return; + + if (banned) { + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", + ctx->name, atomic_read(&ctx->guilty_count), + score); + i915_gem_context_set_banned(ctx); + } + + if (!IS_ERR_OR_NULL(ctx->file_priv)) + client_mark_guilty(ctx->file_priv, ctx); +} + +static void context_mark_innocent(struct i915_gem_context *ctx) +{ + atomic_inc(&ctx->active_count); +} + +static void gen3_stop_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + const u32 base = engine->mmio_base; + + if (intel_engine_stop_cs(engine)) + DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); + + I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); + POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ + + I915_WRITE_FW(RING_HEAD(base), 0); + I915_WRITE_FW(RING_TAIL(base), 0); + POSTING_READ_FW(RING_TAIL(base)); + + /* The ring must be empty before it is disabled */ + I915_WRITE_FW(RING_CTL(base), 0); + + /* Check acts as a post */ + if (I915_READ_FW(RING_HEAD(base)) != 0) + DRM_DEBUG_DRIVER("%s: ring head not parked\n", + engine->name); +} + +static void i915_stop_engines(struct drm_i915_private *i915, + unsigned int engine_mask) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + if (INTEL_GEN(i915) < 3) + return; + + for_each_engine_masked(engine, i915, engine_mask, id) + gen3_stop_engine(engine); +} + +static bool i915_in_reset(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return gdrst & GRDOM_RESET_STATUS; +} + +static int i915_do_reset(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = i915->drm.pdev; + int err; + + /* Assert reset for at least 20 usec, and wait for acknowledgement. */ + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + usleep_range(50, 200); + err = wait_for(i915_in_reset(pdev), 500); + + /* Clear the reset request. */ + pci_write_config_byte(pdev, I915_GDRST, 0); + usleep_range(50, 200); + if (!err) + err = wait_for(!i915_in_reset(pdev), 500); + + return err; +} + +static bool g4x_reset_complete(struct pci_dev *pdev) +{ + u8 gdrst; + + pci_read_config_byte(pdev, I915_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int g33_do_reset(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = i915->drm.pdev; + + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(pdev), 500); +} + +static int g4x_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + int ret; + + /* WaVcpClkGateDisableForMediaReset:ctg,elk */ + I915_WRITE(VDECCLK_GATE_D, + I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); + POSTING_READ(VDECCLK_GATE_D); + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_MEDIA | GRDOM_RESET_ENABLE); + ret = wait_for(g4x_reset_complete(pdev), 500); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + + pci_write_config_byte(pdev, I915_GDRST, + GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(g4x_reset_complete(pdev), 500); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + +out: + pci_write_config_byte(pdev, I915_GDRST, 0); + + I915_WRITE(VDECCLK_GATE_D, + I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); + POSTING_READ(VDECCLK_GATE_D); + + return ret; +} + +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) +{ + int ret; + + I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); + if (ret) { + DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + goto out; + } + + I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); + ret = intel_wait_for_register(dev_priv, + ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, + 500); + if (ret) { + DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + goto out; + } + +out: + I915_WRITE(ILK_GDSR, 0); + POSTING_READ(ILK_GDSR); + return ret; +} + +/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ +static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, + u32 hw_domain_mask) +{ + int err; + + /* + * GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + I915_WRITE_FW(GEN6_GDRST, hw_domain_mask); + + /* Wait for the device to ack the reset requests */ + err = __intel_wait_for_register_fw(dev_priv, + GEN6_GDRST, hw_domain_mask, 0, + 500, 0, + NULL); + if (err) + DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", + hw_domain_mask); + + return err; +} + +static int gen6_reset_engines(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + struct intel_engine_cs *engine; + const u32 hw_engine_mask[I915_NUM_ENGINES] = { + [RCS] = GEN6_GRDOM_RENDER, + [BCS] = GEN6_GRDOM_BLT, + [VCS] = GEN6_GRDOM_MEDIA, + [VCS2] = GEN8_GRDOM_MEDIA2, + [VECS] = GEN6_GRDOM_VECS, + }; + u32 hw_mask; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN6_GRDOM_FULL; + } else { + unsigned int tmp; + + hw_mask = 0; + for_each_engine_masked(engine, i915, engine_mask, tmp) + hw_mask |= hw_engine_mask[engine->id]; + } + + return gen6_hw_domain_reset(i915, hw_mask); +} + +static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; + u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; + i915_reg_t sfc_usage; + u32 sfc_usage_bit; + u32 sfc_reset_bit; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return 0; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + + sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); + sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; + + sfc_usage = GEN11_VECS_SFC_USAGE(engine); + sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; + sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); + break; + + default: + return 0; + } + + /* + * Tell the engine that a software reset is going to happen. The engine + * will then try to force lock the SFC (if currently locked, it will + * remain so until we tell the engine it is safe to unlock; if currently + * unlocked, it will ignore this and all new lock requests). If SFC + * ends up being locked to the engine we want to reset, we have to reset + * it as well (we will unlock it once the reset sequence is completed). + */ + I915_WRITE_FW(sfc_forced_lock, + I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit); + + if (__intel_wait_for_register_fw(dev_priv, + sfc_forced_lock_ack, + sfc_forced_lock_ack_bit, + sfc_forced_lock_ack_bit, + 1000, 0, NULL)) { + DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + return 0; + } + + if (I915_READ_FW(sfc_usage) & sfc_usage_bit) + return sfc_reset_bit; + + return 0; +} + +static void gen11_unlock_sfc(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + i915_reg_t sfc_forced_lock; + u32 sfc_forced_lock_bit; + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + if ((BIT(engine->instance) & vdbox_sfc_access) == 0) + return; + + sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + break; + + case VIDEO_ENHANCEMENT_CLASS: + sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + break; + + default: + return; + } + + I915_WRITE_FW(sfc_forced_lock, + I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit); +} + +static int gen11_reset_engines(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + const u32 hw_engine_mask[I915_NUM_ENGINES] = { + [RCS] = GEN11_GRDOM_RENDER, + [BCS] = GEN11_GRDOM_BLT, + [VCS] = GEN11_GRDOM_MEDIA, + [VCS2] = GEN11_GRDOM_MEDIA2, + [VCS3] = GEN11_GRDOM_MEDIA3, + [VCS4] = GEN11_GRDOM_MEDIA4, + [VECS] = GEN11_GRDOM_VECS, + [VECS2] = GEN11_GRDOM_VECS2, + }; + struct intel_engine_cs *engine; + unsigned int tmp; + u32 hw_mask; + int ret; + + BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN11_GRDOM_FULL; + } else { + hw_mask = 0; + for_each_engine_masked(engine, i915, engine_mask, tmp) { + hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= gen11_lock_sfc(i915, engine); + } + } + + ret = gen6_hw_domain_reset(i915, hw_mask); + + if (engine_mask != ALL_ENGINES) + for_each_engine_masked(engine, i915, engine_mask, tmp) + gen11_unlock_sfc(i915, engine); + + return ret; +} + +static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); + + ret = __intel_wait_for_register_fw(dev_priv, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700, 0, + NULL); + if (ret) + DRM_ERROR("%s: reset request timeout\n", engine->name); + + return ret; +} + +static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); +} + +static int gen8_reset_engines(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + struct intel_engine_cs *engine; + const bool reset_non_ready = retry >= 1; + unsigned int tmp; + int ret; + + for_each_engine_masked(engine, i915, engine_mask, tmp) { + ret = gen8_engine_reset_prepare(engine); + if (ret && !reset_non_ready) + goto skip_reset; + + /* + * If this is not the first failed attempt to prepare, + * we decide to proceed anyway. + * + * By doing so we risk context corruption and with + * some gens (kbl), possible system hang if reset + * happens during active bb execution. + * + * We rather take context corruption instead of + * failed reset with a wedged driver/gpu. And + * active bb execution case should be covered by + * i915_stop_engines we have before the reset. + */ + } + + if (INTEL_GEN(i915) >= 11) + ret = gen11_reset_engines(i915, engine_mask, retry); + else + ret = gen6_reset_engines(i915, engine_mask, retry); + +skip_reset: + for_each_engine_masked(engine, i915, engine_mask, tmp) + gen8_engine_reset_cancel(engine); + + return ret; +} + +typedef int (*reset_func)(struct drm_i915_private *, + unsigned int engine_mask, + unsigned int retry); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) +{ + if (!i915_modparams.reset) + return NULL; + + if (INTEL_GEN(i915) >= 8) + return gen8_reset_engines; + else if (INTEL_GEN(i915) >= 6) + return gen6_reset_engines; + else if (INTEL_GEN(i915) >= 5) + return ironlake_do_reset; + else if (IS_G4X(i915)) + return g4x_do_reset; + else if (IS_G33(i915) || IS_PINEVIEW(i915)) + return g33_do_reset; + else if (INTEL_GEN(i915) >= 3) + return i915_do_reset; + else + return NULL; +} + +int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) +{ + reset_func reset = intel_get_gpu_reset(i915); + int retry; + int ret; + + /* + * We want to perform per-engine reset from atomic context (e.g. + * softirq), which imposes the constraint that we cannot sleep. + * However, experience suggests that spending a bit of time waiting + * for a reset helps in various cases, so for a full-device reset + * we apply the opposite rule and wait if we want to. As we should + * always follow up a failed per-engine reset with a full device reset, + * being a little faster, stricter and more error prone for the + * atomic case seems an acceptable compromise. + * + * Unfortunately this leads to a bimodal routine, when the goal was + * to have a single reset function that worked for resetting any + * number of engines simultaneously. + */ + might_sleep_if(engine_mask == ALL_ENGINES); + + /* + * If the power well sleeps during the reset, the reset + * request may be dropped and never completes (causing -EIO). + */ + intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + for (retry = 0; retry < 3; retry++) { + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + i915_stop_engines(i915, engine_mask); + + ret = -ENODEV; + if (reset) { + GEM_TRACE("engine_mask=%x\n", engine_mask); + ret = reset(i915, engine_mask, retry); + } + if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) + break; + + cond_resched(); + } + intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + + return ret; +} + +bool intel_has_gpu_reset(struct drm_i915_private *i915) +{ + return intel_get_gpu_reset(i915); +} + +bool intel_has_reset_engine(struct drm_i915_private *i915) +{ + return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; +} + +int intel_reset_guc(struct drm_i915_private *i915) +{ + u32 guc_domain = + INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + int ret; + + GEM_BUG_ON(!HAS_GUC(i915)); + + intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + ret = gen6_hw_domain_reset(i915, guc_domain); + intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + + return ret; +} + +/* + * Ensure irq handler finishes, and not run again. + * Also return the active request so that we only search for it once. + */ +static struct i915_request * +reset_prepare_engine(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + /* + * During the reset sequence, we must prevent the engine from + * entering RC6. As the context state is undefined until we restart + * the engine, if it does enter RC6 during the reset, the state + * written to the powercontext is undefined and so we may lose + * GPU state upon resume, i.e. fail to restart after a reset. + */ + intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); + + rq = engine->reset.prepare(engine); + if (rq && rq->fence.error == -EIO) + rq = ERR_PTR(-EIO); /* Previous reset failed! */ + + return rq; +} + +static int reset_prepare(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + struct i915_request *rq; + enum intel_engine_id id; + int err = 0; + + for_each_engine(engine, i915, id) { + rq = reset_prepare_engine(engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + continue; + } + + engine->hangcheck.active_request = rq; + } + + i915_gem_revoke_fences(i915); + intel_uc_sanitize(i915); + + return err; +} + +/* Returns the request if it was guilty of the hang */ +static struct i915_request * +reset_request(struct intel_engine_cs *engine, + struct i915_request *rq, + bool stalled) +{ + /* + * The guilty request will get skipped on a hung engine. + * + * Users of client default contexts do not rely on logical + * state preserved between batches so it is safe to execute + * queued requests following the hang. Non default contexts + * rely on preserved state, so skipping a batch loses the + * evolution of the state and it needs to be considered corrupted. + * Executing more queued batches on top of corrupted state is + * risky. But we take the risk by trying to advance through + * the queued requests in order to make the client behaviour + * more predictable around resets, by not throwing away random + * amount of batches it has prepared for execution. Sophisticated + * clients can use gem_reset_stats_ioctl and dma fence status + * (exported via sync_file info ioctl on explicit fences) to observe + * when it loses the context state and should rebuild accordingly. + * + * The context ban, and ultimately the client ban, mechanism are safety + * valves if client submission ends up resulting in nothing more than + * subsequent hangs. + */ + + if (i915_request_completed(rq)) { + GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n", + engine->name, rq->global_seqno, + rq->fence.context, rq->fence.seqno, + intel_engine_get_seqno(engine)); + stalled = false; + } + + if (stalled) { + context_mark_guilty(rq->gem_context); + i915_request_skip(rq, -EIO); + + /* If this context is now banned, skip all pending requests. */ + if (i915_gem_context_is_banned(rq->gem_context)) + engine_skip_context(rq); + } else { + /* + * Since this is not the hung engine, it may have advanced + * since the hang declaration. Double check by refinding + * the active request at the time of the reset. + */ + rq = i915_gem_find_active_request(engine); + if (rq) { + unsigned long flags; + + context_mark_innocent(rq->gem_context); + dma_fence_set_error(&rq->fence, -EAGAIN); + + /* Rewind the engine to replay the incomplete rq */ + spin_lock_irqsave(&engine->timeline.lock, flags); + rq = list_prev_entry(rq, link); + if (&rq->link == &engine->timeline.requests) + rq = NULL; + spin_unlock_irqrestore(&engine->timeline.lock, flags); + } + } + + return rq; +} + +static void reset_engine(struct intel_engine_cs *engine, + struct i915_request *rq, + bool stalled) +{ + if (rq) + rq = reset_request(engine, rq, stalled); + + /* Setup the CS to resume from the breadcrumb of the hung request */ + engine->reset.reset(engine, rq); +} + +static void gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + i915_retire_requests(i915); + + for_each_engine(engine, i915, id) { + struct intel_context *ce; + + reset_engine(engine, + engine->hangcheck.active_request, + stalled_mask & ENGINE_MASK(id)); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); + + /* + * Ostensibily, we always want a context loaded for powersaving, + * so if the engine is idle after the reset, send a request + * to load our scratch kernel_context. + * + * More mysteriously, if we leave the engine idle after a reset, + * the next userspace batch may hang, with what appears to be + * an incoherent read by the CS (presumably stale TLB). An + * empty request appears sufficient to paper over the glitch. + */ + if (intel_engine_is_idle(engine)) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (!IS_ERR(rq)) + i915_request_add(rq); + } + } + + i915_gem_restore_fences(i915); +} + +static void reset_finish_engine(struct intel_engine_cs *engine) +{ + engine->reset.finish(engine); + + intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); +} + +static void reset_finish(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + for_each_engine(engine, i915, id) { + engine->hangcheck.active_request = NULL; + reset_finish_engine(engine); + } +} + +static void nop_submit_request(struct i915_request *request) +{ + unsigned long flags; + + GEM_TRACE("%s fence %llx:%lld -> -EIO\n", + request->engine->name, + request->fence.context, request->fence.seqno); + dma_fence_set_error(&request->fence, -EIO); + + spin_lock_irqsave(&request->engine->timeline.lock, flags); + __i915_request_submit(request); + intel_engine_write_global_seqno(request->engine, request->global_seqno); + spin_unlock_irqrestore(&request->engine->timeline.lock, flags); +} + +void i915_gem_set_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + mutex_lock(&error->wedge_mutex); + if (test_bit(I915_WEDGED, &error->flags)) { + mutex_unlock(&error->wedge_mutex); + return; + } + + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { + struct drm_printer p = drm_debug_printer(__func__); + + for_each_engine(engine, i915, id) + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + + GEM_TRACE("start\n"); + + /* + * First, stop submission to hw, but do not yet complete requests by + * rolling the global seqno forward (since this would complete requests + * for which we haven't set the fence error to EIO yet). + */ + for_each_engine(engine, i915, id) + reset_prepare_engine(engine); + + /* Even if the GPU reset fails, it should still stop the engines */ + if (INTEL_GEN(i915) >= 5) + intel_gpu_reset(i915, ALL_ENGINES); + + for_each_engine(engine, i915, id) { + engine->submit_request = nop_submit_request; + engine->schedule = NULL; + } + i915->caps.scheduler = 0; + + /* + * Make sure no request can slip through without getting completed by + * either this call here to intel_engine_write_global_seqno, or the one + * in nop_submit_request. + */ + synchronize_rcu(); + + /* Mark all executing requests as skipped */ + for_each_engine(engine, i915, id) + engine->cancel_requests(engine); + + for_each_engine(engine, i915, id) { + reset_finish_engine(engine); + intel_engine_wakeup(engine); + } + + smp_mb__before_atomic(); + set_bit(I915_WEDGED, &error->flags); + + GEM_TRACE("end\n"); + mutex_unlock(&error->wedge_mutex); + + wake_up_all(&error->reset_queue); +} + +bool i915_gem_unset_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct i915_timeline *tl; + bool ret = false; + + lockdep_assert_held(&i915->drm.struct_mutex); + + if (!test_bit(I915_WEDGED, &error->flags)) + return true; + + if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ + return false; + + mutex_lock(&error->wedge_mutex); + + GEM_TRACE("start\n"); + + /* + * Before unwedging, make sure that all pending operations + * are flushed and errored out - we may have requests waiting upon + * third party fences. We marked all inflight requests as EIO, and + * every execbuf since returned EIO, for consistency we want all + * the currently pending requests to also be marked as EIO, which + * is done inside our nop_submit_request - and so we must wait. + * + * No more can be submitted until we reset the wedged bit. + */ + list_for_each_entry(tl, &i915->gt.timelines, link) { + struct i915_request *rq; + + rq = i915_gem_active_peek(&tl->last_request, + &i915->drm.struct_mutex); + if (!rq) + continue; + + /* + * We can't use our normal waiter as we want to + * avoid recursively trying to handle the current + * reset. The basic dma_fence_default_wait() installs + * a callback for dma_fence_signal(), which is + * triggered by our nop handler (indirectly, the + * callback enables the signaler thread which is + * woken by the nop_submit_request() advancing the seqno + * and when the seqno passes the fence, the signaler + * then signals the fence waking us up). + */ + if (dma_fence_default_wait(&rq->fence, true, + MAX_SCHEDULE_TIMEOUT) < 0) + goto unlock; + } + i915_retire_requests(i915); + GEM_BUG_ON(i915->gt.active_requests); + + intel_engines_sanitize(i915, false); + + /* + * Undo nop_submit_request. We prevent all new i915 requests from + * being queued (by disallowing execbuf whilst wedged) so having + * waited for all active requests above, we know the system is idle + * and do not have to worry about a thread being inside + * engine->submit_request() as we swap over. So unlike installing + * the nop_submit_request on reset, we can do this from normal + * context and do not require stop_machine(). + */ + intel_engines_reset_default_submission(i915); + i915_gem_contexts_lost(i915); + + GEM_TRACE("end\n"); + + smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ + clear_bit(I915_WEDGED, &i915->gpu_error.flags); + ret = true; +unlock: + mutex_unlock(&i915->gpu_error.wedge_mutex); + + return ret; +} + +/** + * i915_reset - reset chip after a hang + * @i915: #drm_i915_private to reset + * @stalled_mask: mask of the stalled engines with the guilty requests + * @reason: user error message for why we are resetting + * + * Reset the chip. Useful if a hang is detected. Marks the device as wedged + * on failure. + * + * Caller must hold the struct_mutex. + * + * Procedure is fairly simple: + * - reset the chip using the reset reg + * - re-init context state + * - re-init hardware status page + * - re-init ring buffer + * - re-init interrupt state + * - re-init display + */ +void i915_reset(struct drm_i915_private *i915, + unsigned int stalled_mask, + const char *reason) +{ + struct i915_gpu_error *error = &i915->gpu_error; + int ret; + int i; + + GEM_TRACE("flags=%lx\n", error->flags); + + might_sleep(); + lockdep_assert_held(&i915->drm.struct_mutex); + assert_rpm_wakelock_held(i915); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); + + if (!test_bit(I915_RESET_HANDOFF, &error->flags)) + return; + + /* Clear any previous failed attempts at recovery. Time to try again. */ + if (!i915_gem_unset_wedged(i915)) + goto wakeup; + + if (reason) + dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); + error->reset_count++; + + ret = reset_prepare(i915); + if (ret) { + dev_err(i915->drm.dev, "GPU recovery failed\n"); + goto taint; + } + + if (!intel_has_gpu_reset(i915)) { + if (i915_modparams.reset) + dev_err(i915->drm.dev, "GPU reset not supported\n"); + else + DRM_DEBUG_DRIVER("GPU reset disabled\n"); + goto error; + } + + for (i = 0; i < 3; i++) { + ret = intel_gpu_reset(i915, ALL_ENGINES); + if (ret == 0) + break; + + msleep(100); + } + if (ret) { + dev_err(i915->drm.dev, "Failed to reset chip\n"); + goto taint; + } + + /* Ok, now get things going again... */ + + /* + * Everything depends on having the GTT running, so we need to start + * there. + */ + ret = i915_ggtt_enable_hw(i915); + if (ret) { + DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", + ret); + goto error; + } + + gt_reset(i915, stalled_mask); + intel_overlay_reset(i915); + + /* + * Next we need to restore the context, but we don't use those + * yet either... + * + * Ring buffer needs to be re-initialized in the KMS case, or if X + * was running at the time of the reset (i.e. we weren't VT + * switched away). + */ + ret = i915_gem_init_hw(i915); + if (ret) { + DRM_ERROR("Failed to initialise HW following reset (%d)\n", + ret); + goto error; + } + + i915_queue_hangcheck(i915); + +finish: + reset_finish(i915); +wakeup: + clear_bit(I915_RESET_HANDOFF, &error->flags); + wake_up_bit(&error->flags, I915_RESET_HANDOFF); + return; + +taint: + /* + * History tells us that if we cannot reset the GPU now, we + * never will. This then impacts everything that is run + * subsequently. On failing the reset, we mark the driver + * as wedged, preventing further execution on the GPU. + * We also want to go one step further and add a taint to the + * kernel so that any subsequent faults can be traced back to + * this failure. This is important for CI, where if the + * GPU/driver fails we would like to reboot and restart testing + * rather than continue on into oblivion. For everyone else, + * the system should still plod along, but they have been warned! + */ + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); +error: + i915_gem_set_wedged(i915); + i915_retire_requests(i915); + goto finish; +} + +static inline int intel_gt_reset_engine(struct drm_i915_private *i915, + struct intel_engine_cs *engine) +{ + return intel_gpu_reset(i915, intel_engine_flag(engine)); +} + +/** + * i915_reset_engine - reset GPU engine to recover from a hang + * @engine: engine to reset + * @msg: reason for GPU reset; or NULL for no dev_notice() + * + * Reset a specific GPU engine. Useful if a hang is detected. + * Returns zero on successful reset or otherwise an error code. + * + * Procedure is: + * - identifies the request that caused the hang and it is dropped + * - reset engine (which will force the engine to idle) + * - re-init/configure engine + */ +int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) +{ + struct i915_gpu_error *error = &engine->i915->gpu_error; + struct i915_request *active_request; + int ret; + + GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + + active_request = reset_prepare_engine(engine); + if (IS_ERR_OR_NULL(active_request)) { + /* Either the previous reset failed, or we pardon the reset. */ + ret = PTR_ERR(active_request); + goto out; + } + + if (msg) + dev_notice(engine->i915->drm.dev, + "Resetting %s for %s\n", engine->name, msg); + error->reset_engine_count[engine->id]++; + + if (!engine->i915->guc.execbuf_client) + ret = intel_gt_reset_engine(engine->i915, engine); + else + ret = intel_guc_reset_engine(&engine->i915->guc, engine); + if (ret) { + /* If we fail here, we expect to fallback to a global reset */ + DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", + engine->i915->guc.execbuf_client ? "GuC " : "", + engine->name, ret); + goto out; + } + + /* + * The request that caused the hang is stuck on elsp, we know the + * active request and can drop it, adjust head to skip the offending + * request to resume executing remaining requests in the queue. + */ + reset_engine(engine, active_request, true); + + /* + * The engine and its registers (and workarounds in case of render) + * have been reset to their default values. Follow the init_ring + * process to program RING_MODE, HWSP and re-enable submission. + */ + ret = engine->init_hw(engine); + if (ret) + goto out; + +out: + intel_engine_cancel_stop_cs(engine); + reset_finish_engine(engine); + return ret; +} + +static void i915_reset_device(struct drm_i915_private *i915, + u32 engine_mask, + const char *reason) +{ + struct i915_gpu_error *error = &i915->gpu_error; + struct kobject *kobj = &i915->drm.primary->kdev->kobj; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; + struct i915_wedge_me w; + + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); + + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); + + /* Use a watchdog to ensure that our reset completes */ + i915_wedge_on_timeout(&w, i915, 5 * HZ) { + intel_prepare_reset(i915); + + error->reason = reason; + error->stalled_mask = engine_mask; + + /* Signal that locked waiters should reset the GPU */ + smp_mb__before_atomic(); + set_bit(I915_RESET_HANDOFF, &error->flags); + wake_up_all(&error->wait_queue); + + /* + * Wait for anyone holding the lock to wakeup, without + * blocking indefinitely on struct_mutex. + */ + do { + if (mutex_trylock(&i915->drm.struct_mutex)) { + i915_reset(i915, engine_mask, reason); + mutex_unlock(&i915->drm.struct_mutex); + } + } while (wait_on_bit_timeout(&error->flags, + I915_RESET_HANDOFF, + TASK_UNINTERRUPTIBLE, + 1)); + + error->stalled_mask = 0; + error->reason = NULL; + + intel_finish_reset(i915); + } + + if (!test_bit(I915_WEDGED, &error->flags)) + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); +} + +void i915_clear_error_registers(struct drm_i915_private *dev_priv) +{ + u32 eir; + + if (!IS_GEN(dev_priv, 2)) + I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); + + if (INTEL_GEN(dev_priv) < 4) + I915_WRITE(IPEIR, I915_READ(IPEIR)); + else + I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); + + I915_WRITE(EIR, I915_READ(EIR)); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); + } + + if (INTEL_GEN(dev_priv) >= 8) { + I915_WRITE(GEN8_RING_FAULT_REG, + I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); + POSTING_READ(GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(dev_priv) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, dev_priv, id) { + I915_WRITE(RING_FAULT_REG(engine), + I915_READ(RING_FAULT_REG(engine)) & + ~RING_FAULT_VALID); + } + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); + } +} + +/** + * i915_handle_error - handle a gpu error + * @i915: i915 device private + * @engine_mask: mask representing engines that are hung + * @flags: control flags + * @fmt: Error message format string + * + * Do some basic checking of register state at error time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +void i915_handle_error(struct drm_i915_private *i915, + u32 engine_mask, + unsigned long flags, + const char *fmt, ...) +{ + struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + unsigned int tmp; + char error_msg[80]; + char *msg = NULL; + + if (fmt) { + va_list args; + + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + msg = error_msg; + } + + /* + * In most cases it's guaranteed that we get here with an RPM + * reference held, for example because there is a pending GPU + * request that won't finish until the reset is done. This + * isn't the case at least when we get here by doing a + * simulated reset via debugfs, so get an RPM reference. + */ + wakeref = intel_runtime_pm_get(i915); + + engine_mask &= INTEL_INFO(i915)->ring_mask; + + if (flags & I915_ERROR_CAPTURE) { + i915_capture_error_state(i915, engine_mask, msg); + i915_clear_error_registers(i915); + } + + /* + * Try engine reset when available. We fall back to full reset if + * single reset fails. + */ + if (intel_has_reset_engine(i915) && + !i915_terminally_wedged(&i915->gpu_error)) { + for_each_engine_masked(engine, i915, engine_mask, tmp) { + BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &i915->gpu_error.flags)) + continue; + + if (i915_reset_engine(engine, msg) == 0) + engine_mask &= ~intel_engine_flag(engine); + + clear_bit(I915_RESET_ENGINE + engine->id, + &i915->gpu_error.flags); + wake_up_bit(&i915->gpu_error.flags, + I915_RESET_ENGINE + engine->id); + } + } + + if (!engine_mask) + goto out; + + /* Full reset needs the mutex, stop any other user trying to do so. */ + if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) { + wait_event(i915->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, + &i915->gpu_error.flags)); + goto out; + } + + /* Prevent any other reset-engine attempt. */ + for_each_engine(engine, i915, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &i915->gpu_error.flags)) + wait_on_bit(&i915->gpu_error.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } + + i915_reset_device(i915, engine_mask, msg); + + for_each_engine(engine, i915, tmp) { + clear_bit(I915_RESET_ENGINE + engine->id, + &i915->gpu_error.flags); + } + + clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); + +out: + intel_runtime_pm_put(i915, wakeref); +} + +static void i915_wedge_me(struct work_struct *work) +{ + struct i915_wedge_me *w = container_of(work, typeof(*w), work.work); + + dev_err(w->i915->drm.dev, + "%s timed out, cancelling all in-flight rendering.\n", + w->name); + i915_gem_set_wedged(w->i915); +} + +void __i915_init_wedge(struct i915_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name) +{ + w->i915 = i915; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +void __i915_fini_wedge(struct i915_wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->i915 = NULL; +} diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h new file mode 100644 index 000000000000..b6a519bde67d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reset.h @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2018 Intel Corporation + */ + +#ifndef I915_RESET_H +#define I915_RESET_H + +#include +#include + +struct drm_i915_private; +struct intel_engine_cs; +struct intel_guc; + +__printf(4, 5) +void i915_handle_error(struct drm_i915_private *i915, + u32 engine_mask, + unsigned long flags, + const char *fmt, ...); +#define I915_ERROR_CAPTURE BIT(0) + +void i915_clear_error_registers(struct drm_i915_private *i915); + +void i915_reset(struct drm_i915_private *i915, + unsigned int stalled_mask, + const char *reason); +int i915_reset_engine(struct intel_engine_cs *engine, + const char *reason); + +bool intel_has_gpu_reset(struct drm_i915_private *i915); +bool intel_has_reset_engine(struct drm_i915_private *i915); + +int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask); + +int intel_reset_guc(struct drm_i915_private *i915); + +struct i915_wedge_me { + struct delayed_work work; + struct drm_i915_private *i915; + const char *name; +}; + +void __i915_init_wedge(struct i915_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name); +void __i915_fini_wedge(struct i915_wedge_me *w); + +#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ + for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \ + (W)->i915; \ + __i915_fini_wedge((W))) + +#endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c6b3b69aaeac..8d6d7ae311f4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -31,13 +31,7 @@ #include #include #include -#include "intel_drv.h" -#include "intel_frontbuffer.h" #include -#include "i915_drv.h" -#include "i915_gem_clflush.h" -#include "intel_dsi.h" -#include "i915_trace.h" #include #include #include @@ -48,6 +42,15 @@ #include #include +#include "intel_drv.h" +#include "intel_dsi.h" +#include "intel_frontbuffer.h" + +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "i915_reset.h" +#include "i915_trace.h" + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 200218cb157f..eed0da03ff5e 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -25,6 +25,7 @@ #include #include "i915_drv.h" +#include "i915_reset.h" #include "intel_ringbuffer.h" #include "intel_lrc.h" diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 0f1c4f9ebfd8..744220296653 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -192,4 +192,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) spin_unlock_irq(&guc->irq_lock); } +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine); + #endif diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 51e9efec5116..7dc11fcb13de 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -23,6 +23,7 @@ */ #include "i915_drv.h" +#include "i915_reset.h" static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) { diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 731b82afe636..e711eb3268bc 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -26,6 +26,7 @@ #include "intel_guc_submission.h" #include "intel_guc.h" #include "i915_drv.h" +#include "i915_reset.h" static void guc_free_load_err_log(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 681ea532585e..e88f0252d77e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1715,372 +1715,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, return ret; } -static void gen3_stop_engine(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - const u32 base = engine->mmio_base; - - if (intel_engine_stop_cs(engine)) - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); - - I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); - POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ - - I915_WRITE_FW(RING_HEAD(base), 0); - I915_WRITE_FW(RING_TAIL(base), 0); - POSTING_READ_FW(RING_TAIL(base)); - - /* The ring must be empty before it is disabled */ - I915_WRITE_FW(RING_CTL(base), 0); - - /* Check acts as a post */ - if (I915_READ_FW(RING_HEAD(base)) != 0) - DRM_DEBUG_DRIVER("%s: ring head not parked\n", - engine->name); -} - -static void i915_stop_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (INTEL_GEN(dev_priv) < 3) - return; - - for_each_engine_masked(engine, dev_priv, engine_mask, id) - gen3_stop_engine(engine); -} - -static bool i915_in_reset(struct pci_dev *pdev) -{ - u8 gdrst; - - pci_read_config_byte(pdev, I915_GDRST, &gdrst); - return gdrst & GRDOM_RESET_STATUS; -} - -static int i915_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int err; - - /* Assert reset for at least 20 usec, and wait for acknowledgement. */ - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); - usleep_range(50, 200); - err = wait_for(i915_in_reset(pdev), 500); - - /* Clear the reset request. */ - pci_write_config_byte(pdev, I915_GDRST, 0); - usleep_range(50, 200); - if (!err) - err = wait_for(!i915_in_reset(pdev), 500); - - return err; -} - -static bool g4x_reset_complete(struct pci_dev *pdev) -{ - u8 gdrst; - - pci_read_config_byte(pdev, I915_GDRST, &gdrst); - return (gdrst & GRDOM_RESET_ENABLE) == 0; -} - -static int g33_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(pdev), 500); -} - -static int g4x_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int ret; - - /* WaVcpClkGateDisableForMediaReset:ctg,elk */ - I915_WRITE(VDECCLK_GATE_D, - I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); - POSTING_READ(VDECCLK_GATE_D); - - pci_write_config_byte(pdev, I915_GDRST, - GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(pdev), 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); - goto out; - } - - pci_write_config_byte(pdev, I915_GDRST, - GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(pdev), 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); - goto out; - } - -out: - pci_write_config_byte(pdev, I915_GDRST, 0); - - I915_WRITE(VDECCLK_GATE_D, - I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); - POSTING_READ(VDECCLK_GATE_D); - - return ret; -} - -static int ironlake_do_reset(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - int ret; - - I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); - ret = intel_wait_for_register(dev_priv, - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, - 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); - goto out; - } - - I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); - ret = intel_wait_for_register(dev_priv, - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, - 500); - if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); - goto out; - } - -out: - I915_WRITE(ILK_GDSR, 0); - POSTING_READ(ILK_GDSR); - return ret; -} - -/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ -static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, - u32 hw_domain_mask) -{ - int err; - - /* GEN6_GDRST is not in the gt power well, no need to check - * for fifo space for the write or forcewake the chip for - * the read - */ - __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); - - /* Wait for the device to ack the reset requests */ - err = __intel_wait_for_register_fw(dev_priv, - GEN6_GDRST, hw_domain_mask, 0, - 500, 0, - NULL); - if (err) - DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", - hw_domain_mask); - - return err; -} - -/** - * gen6_reset_engines - reset individual engines - * @dev_priv: i915 device - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset - * @retry: the count of of previous attempts to reset. - * - * This function will reset the individual engines that are set in engine_mask. - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. - * - * Note: It is responsibility of the caller to handle the difference between - * asking full domain reset versus reset for all available individual engines. - * - * Returns 0 on success, nonzero on error. - */ -static int gen6_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct intel_engine_cs *engine; - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN6_GRDOM_RENDER, - [BCS] = GEN6_GRDOM_BLT, - [VCS] = GEN6_GRDOM_MEDIA, - [VCS2] = GEN8_GRDOM_MEDIA2, - [VECS] = GEN6_GRDOM_VECS, - }; - u32 hw_mask; - - if (engine_mask == ALL_ENGINES) { - hw_mask = GEN6_GRDOM_FULL; - } else { - unsigned int tmp; - - hw_mask = 0; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - hw_mask |= hw_engine_mask[engine->id]; - } - - return gen6_hw_domain_reset(dev_priv, hw_mask); -} - -static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; - i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; - u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; - i915_reg_t sfc_usage; - u32 sfc_usage_bit; - u32 sfc_reset_bit; - - switch (engine->class) { - case VIDEO_DECODE_CLASS: - if ((BIT(engine->instance) & vdbox_sfc_access) == 0) - return 0; - - sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; - - sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); - sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; - - sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); - sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; - sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); - break; - - case VIDEO_ENHANCEMENT_CLASS: - sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; - - sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); - sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; - - sfc_usage = GEN11_VECS_SFC_USAGE(engine); - sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; - sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); - break; - - default: - return 0; - } - - /* - * Tell the engine that a software reset is going to happen. The engine - * will then try to force lock the SFC (if currently locked, it will - * remain so until we tell the engine it is safe to unlock; if currently - * unlocked, it will ignore this and all new lock requests). If SFC - * ends up being locked to the engine we want to reset, we have to reset - * it as well (we will unlock it once the reset sequence is completed). - */ - I915_WRITE_FW(sfc_forced_lock, - I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit); - - if (__intel_wait_for_register_fw(dev_priv, - sfc_forced_lock_ack, - sfc_forced_lock_ack_bit, - sfc_forced_lock_ack_bit, - 1000, 0, NULL)) { - DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); - return 0; - } - - if (I915_READ_FW(sfc_usage) & sfc_usage_bit) - return sfc_reset_bit; - - return 0; -} - -static void gen11_unlock_sfc(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine) -{ - u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; - i915_reg_t sfc_forced_lock; - u32 sfc_forced_lock_bit; - - switch (engine->class) { - case VIDEO_DECODE_CLASS: - if ((BIT(engine->instance) & vdbox_sfc_access) == 0) - return; - - sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; - break; - - case VIDEO_ENHANCEMENT_CLASS: - sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; - break; - - default: - return; - } - - I915_WRITE_FW(sfc_forced_lock, - I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit); -} - -/** - * gen11_reset_engines - reset individual engines - * @dev_priv: i915 device - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset - * - * This function will reset the individual engines that are set in engine_mask. - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. - * - * Note: It is responsibility of the caller to handle the difference between - * asking full domain reset versus reset for all available individual engines. - * - * Returns 0 on success, nonzero on error. - */ -static int gen11_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask) -{ - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN11_GRDOM_RENDER, - [BCS] = GEN11_GRDOM_BLT, - [VCS] = GEN11_GRDOM_MEDIA, - [VCS2] = GEN11_GRDOM_MEDIA2, - [VCS3] = GEN11_GRDOM_MEDIA3, - [VCS4] = GEN11_GRDOM_MEDIA4, - [VECS] = GEN11_GRDOM_VECS, - [VECS2] = GEN11_GRDOM_VECS2, - }; - struct intel_engine_cs *engine; - unsigned int tmp; - u32 hw_mask; - int ret; - - BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); - - if (engine_mask == ALL_ENGINES) { - hw_mask = GEN11_GRDOM_FULL; - } else { - hw_mask = 0; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - hw_mask |= hw_engine_mask[engine->id]; - hw_mask |= gen11_lock_sfc(dev_priv, engine); - } - } - - ret = gen6_hw_domain_reset(dev_priv, hw_mask); - - if (engine_mask != ALL_ENGINES) - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - gen11_unlock_sfc(dev_priv, engine); - - return ret; -} - /** * __intel_wait_for_register_fw - wait until register matches expected state * @dev_priv: the i915 device @@ -2191,196 +1825,6 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, return ret; } -static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int ret; - - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - - ret = __intel_wait_for_register_fw(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700, 0, - NULL); - if (ret) - DRM_ERROR("%s: reset request timeout\n", engine->name); - - return ret; -} - -static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); -} - -static int reset_engines(struct drm_i915_private *i915, - unsigned int engine_mask, - unsigned int retry) -{ - if (INTEL_GEN(i915) >= 11) - return gen11_reset_engines(i915, engine_mask); - else - return gen6_reset_engines(i915, engine_mask, retry); -} - -static int gen8_reset_engines(struct drm_i915_private *dev_priv, - unsigned int engine_mask, - unsigned int retry) -{ - struct intel_engine_cs *engine; - const bool reset_non_ready = retry >= 1; - unsigned int tmp; - int ret; - - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - ret = gen8_engine_reset_prepare(engine); - if (ret && !reset_non_ready) - goto skip_reset; - - /* - * If this is not the first failed attempt to prepare, - * we decide to proceed anyway. - * - * By doing so we risk context corruption and with - * some gens (kbl), possible system hang if reset - * happens during active bb execution. - * - * We rather take context corruption instead of - * failed reset with a wedged driver/gpu. And - * active bb execution case should be covered by - * i915_stop_engines we have before the reset. - */ - } - - ret = reset_engines(dev_priv, engine_mask, retry); - -skip_reset: - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - gen8_engine_reset_cancel(engine); - - return ret; -} - -typedef int (*reset_func)(struct drm_i915_private *, - unsigned int engine_mask, unsigned int retry); - -static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) -{ - if (!i915_modparams.reset) - return NULL; - - if (INTEL_GEN(dev_priv) >= 8) - return gen8_reset_engines; - else if (INTEL_GEN(dev_priv) >= 6) - return gen6_reset_engines; - else if (IS_GEN(dev_priv, 5)) - return ironlake_do_reset; - else if (IS_G4X(dev_priv)) - return g4x_do_reset; - else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) - return g33_do_reset; - else if (INTEL_GEN(dev_priv) >= 3) - return i915_do_reset; - else - return NULL; -} - -int intel_gpu_reset(struct drm_i915_private *dev_priv, - const unsigned int engine_mask) -{ - reset_func reset = intel_get_gpu_reset(dev_priv); - unsigned int retry; - int ret; - - GEM_BUG_ON(!engine_mask); - - /* - * We want to perform per-engine reset from atomic context (e.g. - * softirq), which imposes the constraint that we cannot sleep. - * However, experience suggests that spending a bit of time waiting - * for a reset helps in various cases, so for a full-device reset - * we apply the opposite rule and wait if we want to. As we should - * always follow up a failed per-engine reset with a full device reset, - * being a little faster, stricter and more error prone for the - * atomic case seems an acceptable compromise. - * - * Unfortunately this leads to a bimodal routine, when the goal was - * to have a single reset function that worked for resetting any - * number of engines simultaneously. - */ - might_sleep_if(engine_mask == ALL_ENGINES); - - /* - * If the power well sleeps during the reset, the reset - * request may be dropped and never completes (causing -EIO). - */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - for (retry = 0; retry < 3; retry++) { - - /* - * We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). Also as modern gpu as kbl can suffer - * from system hang if batchbuffer is progressing when - * the reset is issued, regardless of READY_TO_RESET ack. - * Thus assume it is best to stop engines on all gens - * where we have a gpu reset. - * - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) - * - * WaMediaResetMainRingCleanup:ctg,elk (presumably) - * - * FIXME: Wa for more modern gens needs to be validated - */ - i915_stop_engines(dev_priv, engine_mask); - - ret = -ENODEV; - if (reset) { - ret = reset(dev_priv, engine_mask, retry); - GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n", - engine_mask, ret, retry); - } - if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) - break; - - cond_resched(); - } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - - return ret; -} - -bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) -{ - return intel_get_gpu_reset(dev_priv) != NULL; -} - -bool intel_has_reset_engine(struct drm_i915_private *dev_priv) -{ - return (INTEL_INFO(dev_priv)->has_reset_engine && - i915_modparams.reset >= 2); -} - -int intel_reset_guc(struct drm_i915_private *dev_priv) -{ - u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC : - GEN9_GRDOM_GUC; - int ret; - - GEM_BUG_ON(!HAS_GUC(dev_priv)); - - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = gen6_hw_domain_reset(dev_priv, guc_domain); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - - return ret; -} - bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) { return check_for_unclaimed_mmio(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index e6073cd4719c..2b2ecd76c2ac 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -4,6 +4,8 @@ * Copyright © 2018 Intel Corporation */ +#include "../i915_reset.h" + #include "../i915_selftest.h" #include "igt_flush_test.h" #include "igt_spinner.h" diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 9009d7b8b136..a8cac56be835 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -5,6 +5,7 @@ */ #include "../i915_selftest.h" +#include "../i915_reset.h" #include "igt_flush_test.h" #include "igt_reset.h" -- cgit v1.2.3 From 739f3abdbfcf8c950bb35eb08530cee489e1a967 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:19 +0200 Subject: drm/i915: small isolated c99 types to kernel types switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch fixes sprinkled on top of the changed lines. Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/14ed72e7f04c9340a057855c5950b54811f8a477.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++------- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 8 ++++---- drivers/gpu/drm/i915/i915_gpu_error.c | 10 +++++----- drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- drivers/gpu/drm/i915/intel_atomic.c | 4 ++-- drivers/gpu/drm/i915/intel_atomic_plane.c | 4 ++-- drivers/gpu/drm/i915/intel_dp_mst.c | 2 +- drivers/gpu/drm/i915/intel_dpio_phy.c | 18 +++++++++--------- drivers/gpu/drm/i915/intel_engine_cs.c | 12 ++++++------ drivers/gpu/drm/i915/intel_fbc.c | 2 +- drivers/gpu/drm/i915/intel_fifo_underrun.c | 12 ++++++------ drivers/gpu/drm/i915/intel_hdcp.c | 4 ++-- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_pipe_crc.c | 18 +++++++++--------- drivers/gpu/drm/i915/intel_psr.c | 6 +++--- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 20 ++++++++++---------- 18 files changed, 72 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7185a5b4a5ca..b359390ba22c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -713,8 +713,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) static int i915_gem_create(struct drm_file *file, struct drm_i915_private *dev_priv, - uint64_t size, - uint32_t *handle_p) + u64 size, + u32 *handle_p) { struct drm_i915_gem_object *obj; int ret; @@ -1573,8 +1573,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_object *obj; - uint32_t read_domains = args->read_domains; - uint32_t write_domain = args->write_domain; + u32 read_domains = args->read_domains; + u32 write_domain = args->write_domain; int err; /* Only handle setting domains to types used by the CPU. */ @@ -1756,7 +1756,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (IS_ERR((void *)addr)) return addr; - args->addr_ptr = (uint64_t) addr; + args->addr_ptr = (u64)addr; return 0; } @@ -2158,8 +2158,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, - uint32_t handle, - uint64_t *offset) + u32 handle, + u64 *offset) { struct drm_i915_gem_object *obj; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index f7947d89cf45..46e259661294 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -555,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) { - uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) { /* @@ -579,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } else { - uint32_t dimm_c0, dimm_c1; + u32 dimm_c0, dimm_c1; dimm_c0 = I915_READ(MAD_DIMM_C0); dimm_c1 = I915_READ(MAD_DIMM_C1); dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; @@ -611,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } else if (IS_MOBILE(dev_priv) || IS_I915G(dev_priv) || IS_I945G(dev_priv)) { - uint32_t dcc; + u32 dcc; /* On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5eaf586c4d48..1f8e80e31b49 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1082,7 +1082,7 @@ i915_error_object_create(struct drm_i915_private *i915, /* The error capture is special as tries to run underneath the normal * locking rules - so we use the raw version of the i915_gem_active lookup. */ -static inline uint32_t +static inline u32 __active_get_seqno(struct i915_gem_active *active) { struct i915_request *request; @@ -1153,11 +1153,11 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, * * It's only a small step better than a random number in its current form. */ -static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, - struct i915_gpu_state *error, - int *engine_id) +static u32 i915_error_generate_code(struct drm_i915_private *dev_priv, + struct i915_gpu_state *error, + int *engine_id) { - uint32_t error_code = 0; + u32 error_code = 0; int i; /* IPEHR would be an ideal way to detect errors, as it's the gross diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index faff6cf1aaa1..727118301f91 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3021,7 +3021,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) (addr >= 0x182300 && addr <= 0x1823A4); } -static uint32_t mask_reg_value(u32 reg, u32 val) +static u32 mask_reg_value(u32 reg, u32 val) { /* HALF_SLICE_CHICKEN2 is programmed with a the * WaDisableSTUnitPowerOptimization workaround. Make sure the value diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fad5a9e8b44d..9a1340cfda6c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -117,14 +117,14 @@ */ typedef struct { - uint32_t reg; + u32 reg; } i915_reg_t; #define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) #define INVALID_MMIO_REG _MMIO(0) -static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg) +static inline u32 i915_mmio_reg_offset(i915_reg_t reg) { return reg.reg; } diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index d8dbc9980281..16263add3cdd 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -46,7 +46,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -78,7 +78,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val) + u64 val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 683a75dad4fb..9a2fdc77ebcb 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -311,7 +311,7 @@ int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", property->base.id, property->name); @@ -334,7 +334,7 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, - uint64_t val) + u64 val) { DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", property->base.id, property->name); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 778c887108b7..909b9f555458 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -239,7 +239,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; - uint32_t temp; + u32 temp; /* MST encoders are bound to a crtc, not to a connector, * force the mapping here for get_hw_state. diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 3c7f10d17658..95cb8b154f87 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -413,7 +413,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, } if (phy_info->rcomp_phy != -1) { - uint32_t grc_code; + u32 grc_code; bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); @@ -445,7 +445,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - uint32_t val; + u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -515,7 +515,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - uint32_t mask; + u32 mask; bool ok; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -567,8 +567,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, #undef _CHK } -uint8_t -bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count) +u8 +bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) { switch (lane_count) { case 1: @@ -585,7 +585,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count) } void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - uint8_t lane_lat_optim_mask) + u8 lane_lat_optim_mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -610,7 +610,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, } } -uint8_t +u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -618,7 +618,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) enum dpio_phy phy; enum dpio_channel ch; int lane; - uint8_t mask; + u8 mask; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); @@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); if (reset) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index eed0da03ff5e..cc6379422bc0 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -800,15 +800,15 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) return mcr_s_ss_select; } -static inline uint32_t +static inline u32 read_subslice_reg(struct drm_i915_private *dev_priv, int slice, int subslice, i915_reg_t reg) { - uint32_t mcr_slice_subslice_mask; - uint32_t mcr_slice_subslice_select; - uint32_t default_mcr_s_ss_select; - uint32_t mcr; - uint32_t ret; + u32 mcr_slice_subslice_mask; + u32 mcr_slice_subslice_select; + u32 default_mcr_s_ss_select; + u32 mcr; + u32 ret; enum forcewake_domains fw_domains; if (INTEL_GEN(dev_priv) >= 11) { diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index ccd5e110a19c..ec72be4b7a7b 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -594,7 +594,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, } static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, - uint32_t pixel_format) + u32 pixel_format) { switch (pixel_format) { case DRM_FORMAT_XRGB8888: diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 9b39975c8389..3b9285130ef5 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : - DE_PIPEB_FIFO_UNDERRUN; + u32 bit = (pipe == PIPE_A) ? + DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; if (enable) ilk_enable_display_irq(dev_priv, bit); @@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t err_int = I915_READ(GEN7_ERR_INT); + u32 err_int = I915_READ(GEN7_ERR_INT); lockdep_assert_held(&dev_priv->irq_lock); @@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t bit = (pch_transcoder == PIPE_A) ? - SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; + u32 bit = (pch_transcoder == PIPE_A) ? + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) ibx_enable_display_interrupt(dev_priv, bit); @@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; - uint32_t serr_int = I915_READ(SERR_INT); + u32 serr_int = I915_READ(SERR_INT); lockdep_assert_held(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 3fcb3b775948..ce7ba3a9c000 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -838,8 +838,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state) { - uint64_t old_cp = old_state->content_protection; - uint64_t new_cp = new_state->content_protection; + u64 old_cp = old_state->content_protection; + u64 new_cp = new_state->content_protection; struct drm_crtc_state *crtc_state; if (!new_state->crtc) { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index a62ad80fdf97..f0fa0f767eb6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2608,7 +2608,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, { struct drm_i915_gem_object *ctx_obj; struct i915_vma *vma; - uint32_t context_size; + u32 context_size; struct intel_ring *ring; struct i915_timeline *timeline; int ret; diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 56d614b02302..a8554dc4f196 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -44,7 +44,7 @@ static const char * const pipe_crc_sources[] = { }; static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; @@ -120,7 +120,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { bool need_stable_symbols = false; @@ -165,7 +165,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); tmp |= DC_BALANCE_RESET_VLV; switch (pipe) { @@ -190,7 +190,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { bool need_stable_symbols = false; @@ -244,7 +244,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); WARN_ON(!IS_G4X(dev_priv)); @@ -265,7 +265,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, enum pipe pipe) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); switch (pipe) { case PIPE_A: @@ -289,7 +289,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, enum pipe pipe) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = I915_READ(PORT_DFT2_G4X); if (pipe == PIPE_A) tmp &= ~PIPE_A_SCRAMBLE_RESET; @@ -304,7 +304,7 @@ static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; @@ -392,7 +392,7 @@ unlock: static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val, + u32 *val, bool set_wa) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 0f6b2b4702e3..8dbf26c212cc 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -230,7 +230,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { - uint8_t dprx = 0; + u8 dprx = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &dprx) != 1) @@ -240,7 +240,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) { - uint8_t alpm_caps = 0; + u8 alpm_caps = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps) != 1) @@ -384,7 +384,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 aux_clock_divider, aux_ctl; int i; - static const uint8_t aux_msg[] = { + static const u8 aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, [2] = DP_SET_POWER & 0xff, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3c1366c58cf3..616f6bbb18ad 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -28,7 +28,7 @@ struct i915_sched_attr; * workarounds! */ #define CACHELINE_BYTES 64 -#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) struct intel_hw_status_page { struct i915_vma *vma; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 79f00610860b..a017a4232c0f 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -903,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored. */ -static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) +static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) { - uint32_t val; - uint32_t mask; + u32 val; + u32 mask; if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) state &= dev_priv->csr.allowed_dc_mask; @@ -1538,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; enum pipe pipe; - uint32_t tmp; + u32 tmp; WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); @@ -3328,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, return 1; } -static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, - int enable_dc) +static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, + int enable_dc) { - uint32_t mask; + u32 mask; int requested_dc; int max_dc; @@ -3596,7 +3596,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; val = MBUS_ABOX_BT_CREDIT_POOL1(16) | MBUS_ABOX_BT_CREDIT_POOL2(16) | @@ -3907,7 +3907,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * current lane status. */ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { - uint32_t status = I915_READ(DPLL(PIPE_A)); + u32 status = I915_READ(DPLL(PIPE_A)); unsigned int mask; mask = status & DPLL_PORTB_READY_MASK; @@ -3938,7 +3938,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) } if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { - uint32_t status = I915_READ(DPIO_PHY_STATUS); + u32 status = I915_READ(DPIO_PHY_STATUS); unsigned int mask; mask = status & DPLL_PORTD_READY_MASK; -- cgit v1.2.3 From c7cc52167541f4c8f97aad58d86d93886ba057ce Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:20 +0200 Subject: drm/i915/crt: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/4deb1838b288e027b6483e7ebd6b7b365d0ef979.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_crt.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 081c333f30d2..c2e799a5e63e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -631,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) } static enum drm_connector_status -intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) +intel_crt_load_detect(struct intel_crt *crt, u32 pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t save_bclrpat; - uint32_t save_vtotal; - uint32_t vtotal, vactive; - uint32_t vsample; - uint32_t vblank, vblank_start, vblank_end; - uint32_t dsl; + u32 save_bclrpat; + u32 save_vtotal; + u32 vtotal, vactive; + u32 vsample; + u32 vblank, vblank_start, vblank_end; + u32 dsl; i915_reg_t bclrpat_reg, vtotal_reg, vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg; - uint8_t st00; + u8 st00; enum drm_connector_status status; DRM_DEBUG_KMS("starting load-detect on CRT\n"); @@ -669,7 +669,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) I915_WRITE(bclrpat_reg, 0x500050); if (!IS_GEN(dev_priv, 2)) { - uint32_t pipeconf = I915_READ(pipeconf_reg); + u32 pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); POSTING_READ(pipeconf_reg); /* Wait for next Vblank to substitue @@ -690,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - uint32_t vsync = I915_READ(vsync_reg); - uint32_t vsync_start = (vsync & 0xffff) + 1; + u32 vsync = I915_READ(vsync_reg); + u32 vsync_start = (vsync & 0xffff) + 1; vblank_start = vsync_start; I915_WRITE(vblank_reg, -- cgit v1.2.3 From 977dcc06c3e9d42318afab68a39dfaf0895cea31 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:22 +0200 Subject: drm/i915/lspcon: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/0c2d399bfb8fd9f90c7899eaaa0a9cab82f0d68d.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_lspcon.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 96a8d9524b0c..f08a9b4058f0 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) } static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, - uint8_t *avi_buf) + u8 *avi_buf) { u8 avi_if_ctrl; u8 block_count = 0; u8 *data; - uint16_t reg; + u16 reg; ssize_t ret; while (block_count < 4) { @@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, } static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, - const uint8_t *frame, + const u8 *frame, ssize_t len) { - uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; + u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; /* * Parade's frames contains 32 bytes of data, divided @@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, } static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, - const uint8_t *buffer, ssize_t len) + const u8 *buffer, ssize_t len) { int ret; - uint32_t val = 0; - uint32_t retry; - uint16_t reg; - const uint8_t *data = buffer; + u32 val = 0; + u32 retry; + u16 reg; + const u8 *data = buffer; reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET; while (val < len) { @@ -459,7 +459,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, { ssize_t ret; union hdmi_infoframe frame; - uint8_t buf[VIDEO_DIP_DATA_SIZE]; + u8 buf[VIDEO_DIP_DATA_SIZE]; struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct intel_lspcon *lspcon = &dig_port->lspcon; struct intel_dp *intel_dp = &dig_port->dp; -- cgit v1.2.3 From e5315213ecd26dbb24fb1ffa2200b3c94c7d32cb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:23 +0200 Subject: drm/i915/debugfs: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/4d71ed8a432b4121516049334512d35623c8acaa.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ece72e0e41bc..cdf7730a79df 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2251,7 +2251,7 @@ static void i915_guc_client_info(struct seq_file *m, { struct intel_engine_cs *engine; enum intel_engine_id id; - uint64_t tot = 0; + u64 tot = 0; seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", client->priority, client->stage_id, client->proc_desc_offset); @@ -3646,7 +3646,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); -static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) +static void wm_latency_show(struct seq_file *m, const u16 wm[8]) { struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; @@ -3689,7 +3689,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) static int pri_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3704,7 +3704,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3719,7 +3719,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - const uint16_t *latencies; + const u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3762,12 +3762,12 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file) } static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp, uint16_t wm[8]) + size_t len, loff_t *offp, u16 wm[8]) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; struct drm_device *dev = &dev_priv->drm; - uint16_t new[8] = { 0 }; + u16 new[8] = { 0 }; int num_levels; int level; int ret; @@ -3812,7 +3812,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3827,7 +3827,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -3842,7 +3842,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - uint16_t *latencies; + u16 *latencies; if (INTEL_GEN(dev_priv) >= 9) latencies = dev_priv->wm.skl_latency; @@ -4860,7 +4860,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data) struct drm_connector *connector = m->private; struct intel_dp *intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); - uint8_t buf[16]; + u8 buf[16]; ssize_t err; int i; -- cgit v1.2.3 From a9c287c94e7980762be9b0d13fde5eff5a3afda2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:24 +0200 Subject: drm/i915/irq: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/841f4eac1c52f4ed3efe2ac9e343d6640c03b774.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 82 ++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1c6cf024a509..1abfc3fa76ad 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -223,10 +223,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); /* For display hotplug interrupt */ static inline void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits) + u32 mask, + u32 bits) { - uint32_t val; + u32 val; lockdep_assert_held(&dev_priv->irq_lock); WARN_ON(bits & ~mask); @@ -250,8 +250,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, * version is also available. */ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits) + u32 mask, + u32 bits) { spin_lock_irq(&dev_priv->irq_lock); i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); @@ -300,10 +300,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, * @enabled_irq_mask: mask of interrupt bits to enable */ void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -330,8 +330,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { lockdep_assert_held(&dev_priv->irq_lock); @@ -345,13 +345,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, I915_WRITE(GTIMR, dev_priv->gt_irq_mask); } -void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) { ilk_update_gt_irq(dev_priv, mask, mask); POSTING_READ_FW(GTIMR); } -void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) { ilk_update_gt_irq(dev_priv, mask, 0); } @@ -390,10 +390,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) * @enabled_irq_mask: mask of interrupt bits to enable */ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; WARN_ON(enabled_irq_mask & ~interrupt_mask); @@ -577,11 +577,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) * @enabled_irq_mask: mask of interrupt bits to enable */ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; - uint32_t old_val; + u32 new_val; + u32 old_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -611,10 +611,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, */ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t new_val; + u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -641,10 +641,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask) + u32 interrupt_mask, + u32 enabled_irq_mask) { - uint32_t sdeimr = I915_READ(SDEIMR); + u32 sdeimr = I915_READ(SDEIMR); sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); @@ -1368,8 +1368,8 @@ static void ivybridge_parity_work(struct work_struct *work) container_of(work, typeof(*dev_priv), l3_parity.error_work); u32 error_status, row, bank, subbank; char *parity_event[6]; - uint32_t misccpctl; - uint8_t slice = 0; + u32 misccpctl; + u8 slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. * In order to prevent a get/put style interface, acquire struct mutex @@ -1730,13 +1730,13 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) #if defined(CONFIG_DEBUG_FS) static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t crc0, uint32_t crc1, - uint32_t crc2, uint32_t crc3, - uint32_t crc4) + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - uint32_t crcs[5]; + u32 crcs[5]; spin_lock(&pipe_crc->lock); /* @@ -1768,9 +1768,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static inline void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t crc0, uint32_t crc1, - uint32_t crc2, uint32_t crc3, - uint32_t crc4) {} + u32 crc0, u32 crc1, + u32 crc2, u32 crc3, + u32 crc4) {} #endif @@ -1796,7 +1796,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - uint32_t res1, res2; + u32 res1, res2; if (INTEL_GEN(dev_priv) >= 3) res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); @@ -3172,7 +3172,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; - uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? + u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3234,7 +3234,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); unsigned long irqflags; - uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? + u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3452,7 +3452,7 @@ static void gen11_irq_reset(struct drm_device *dev) void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { - uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; + u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -3921,7 +3921,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) { /* These are interrupts we'll toggle with the ring mask register */ - uint32_t gt_interrupts[] = { + u32 gt_interrupts[] = { GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | @@ -3949,8 +3949,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { - uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; - uint32_t de_pipe_enables; + u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; -- cgit v1.2.3 From cbe974fb964ec8f830512be0ff34b58242f321bc Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:25 +0200 Subject: drm/i915/cdclk: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/b56d250007a5d85d15038962548abb3e1818480a.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_cdclk.c | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 73cb7250118e..15ba950dee00 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) }; const unsigned int *vco_table; unsigned int vco; - uint8_t tmp = 0; + u8 tmp = 0; /* FIXME other chipsets? */ if (IS_GM45(dev_priv)) @@ -249,13 +249,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { struct pci_dev *pdev = dev_priv->drm.pdev; - static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; - static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; - static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; - static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; - const uint8_t *div_table; + static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; + static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; + static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; + static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 }; + const u8 *div_table; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -330,12 +330,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { struct pci_dev *pdev = dev_priv->drm.pdev; - static const uint8_t div_3200[] = { 16, 10, 8 }; - static const uint8_t div_4000[] = { 20, 12, 10 }; - static const uint8_t div_5333[] = { 24, 16, 14 }; - const uint8_t *div_table; + static const u8 div_3200[] = { 16, 10, 8 }; + static const u8 div_4000[] = { 20, 12, 10 }; + static const u8 div_5333[] = { 24, 16, 14 }; + const u8 *div_table; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -375,7 +375,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, { struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int cdclk_sel; - uint16_t tmp = 0; + u16 tmp = 0; cdclk_state->vco = intel_hpll_vco(dev_priv); @@ -403,8 +403,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv, static void hsw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { - uint32_t lcpll = I915_READ(LCPLL_CTL); - uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; + u32 lcpll = I915_READ(LCPLL_CTL); + u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_state->cdclk = 800000; @@ -672,8 +672,8 @@ static u8 bdw_calc_voltage_level(int cdclk) static void bdw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { - uint32_t lcpll = I915_READ(LCPLL_CTL); - uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; + u32 lcpll = I915_READ(LCPLL_CTL); + u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_state->cdclk = 800000; @@ -700,7 +700,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_state *cdclk_state) { int cdclk = cdclk_state->cdclk; - uint32_t val; + u32 val; int ret; if (WARN((I915_READ(LCPLL_CTL) & @@ -1083,7 +1083,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t cdctl, expected; + u32 cdctl, expected; /* * check if the pre-os initialized the display @@ -2690,7 +2690,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv) static int g4x_hrawclk(struct drm_i915_private *dev_priv) { - uint32_t clkcfg; + u32 clkcfg; /* hrawclock is 1/4 the FSB frequency */ clkcfg = I915_READ(CLKCFG); -- cgit v1.2.3 From 990290d124d5556fedcaae98f0b2f456fc009f4b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:26 +0200 Subject: drm/i915/dpll_mgr: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch/whitespace fixes sprinkled on top of the changed lines. Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/b73aefabb757acf59896bd77dbb20c2e343c6e6d.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_dpll_mgr.c | 145 +++++++++++++++++----------------- drivers/gpu/drm/i915/intel_dpll_mgr.h | 53 ++++++------- 2 files changed, 99 insertions(+), 99 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 04870e960537..606f54dde086 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -346,7 +346,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; - uint32_t val; + u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS); @@ -490,7 +490,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; val = I915_READ(WRPLL_CTL(id)); I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); @@ -500,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t val; + u32 val; val = I915_READ(SPLL_CTL); I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); @@ -513,7 +513,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; - uint32_t val; + u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS); @@ -533,7 +533,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state) { intel_wakeref_t wakeref; - uint32_t val; + u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS); @@ -639,11 +639,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock) return budget; } -static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, - unsigned r2, unsigned n2, unsigned p, +static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget, + unsigned int r2, unsigned int n2, + unsigned int p, struct hsw_wrpll_rnp *best) { - uint64_t a, b, c, d, diff, diff_best; + u64 a, b, c, d, diff, diff_best; /* No best (r,n,p) yet */ if (best->p == 0) { @@ -702,7 +703,7 @@ static void hsw_ddi_calculate_wrpll(int clock /* in Hz */, unsigned *r2_out, unsigned *n2_out, unsigned *p_out) { - uint64_t freq2k; + u64 freq2k; unsigned p, n2, r2; struct hsw_wrpll_rnp best = { 0, 0, 0 }; unsigned budget; @@ -768,7 +769,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, struct intel_crtc_state *crtc_state) { struct intel_shared_dpll *pll; - uint32_t val; + u32 val; unsigned int p, n2, r2; hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); @@ -930,7 +931,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; val = I915_READ(DPLL_CTRL1); @@ -995,7 +996,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - uint32_t val; + u32 val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; @@ -1035,7 +1036,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; - uint32_t val; + u32 val; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, @@ -1062,9 +1063,9 @@ out: } struct skl_wrpll_context { - uint64_t min_deviation; /* current minimal deviation */ - uint64_t central_freq; /* chosen central freq */ - uint64_t dco_freq; /* chosen dco freq */ + u64 min_deviation; /* current minimal deviation */ + u64 central_freq; /* chosen central freq */ + u64 dco_freq; /* chosen dco freq */ unsigned int p; /* chosen divider */ }; @@ -1080,11 +1081,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) #define SKL_DCO_MAX_NDEVIATION 600 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, - uint64_t central_freq, - uint64_t dco_freq, + u64 central_freq, + u64 dco_freq, unsigned int divider) { - uint64_t deviation; + u64 deviation; deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), central_freq); @@ -1158,21 +1159,21 @@ static void skl_wrpll_get_multipliers(unsigned int p, } struct skl_wrpll_params { - uint32_t dco_fraction; - uint32_t dco_integer; - uint32_t qdiv_ratio; - uint32_t qdiv_mode; - uint32_t kdiv; - uint32_t pdiv; - uint32_t central_freq; + u32 dco_fraction; + u32 dco_integer; + u32 qdiv_ratio; + u32 qdiv_mode; + u32 kdiv; + u32 pdiv; + u32 central_freq; }; static void skl_wrpll_params_populate(struct skl_wrpll_params *params, - uint64_t afe_clock, - uint64_t central_freq, - uint32_t p0, uint32_t p1, uint32_t p2) + u64 afe_clock, + u64 central_freq, + u32 p0, u32 p1, u32 p2) { - uint64_t dco_freq; + u64 dco_freq; switch (central_freq) { case 9600000000ULL: @@ -1238,10 +1239,10 @@ static bool skl_ddi_calculate_wrpll(int clock /* in Hz */, struct skl_wrpll_params *wrpll_params) { - uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ - uint64_t dco_central_freq[3] = {8400000000ULL, - 9000000000ULL, - 9600000000ULL}; + u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ + u64 dco_central_freq[3] = { 8400000000ULL, + 9000000000ULL, + 9600000000ULL }; static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30, 32, 36, 40, 42, 44, 48, 52, 54, 56, 60, 64, 66, 68, @@ -1265,7 +1266,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */, for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { for (i = 0; i < dividers[d].n_dividers; i++) { unsigned int p = dividers[d].list[i]; - uint64_t dco_freq = p * afe_clock; + u64 dco_freq = p * afe_clock; skl_wrpll_try_divider(&ctx, dco_central_freq[dco], @@ -1311,7 +1312,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int clock) { - uint32_t ctrl1, cfgcr1, cfgcr2; + u32 ctrl1, cfgcr1, cfgcr2; struct skl_wrpll_params wrpll_params = { 0, }; /* @@ -1348,7 +1349,7 @@ static bool skl_ddi_dp_set_dpll_hw_state(int clock, struct intel_dpll_hw_state *dpll_hw_state) { - uint32_t ctrl1; + u32 ctrl1; /* * See comment in intel_dpll_hw_state to understand why we always use 0 @@ -1450,7 +1451,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t temp; + u32 temp; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; @@ -1571,7 +1572,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - uint32_t temp; + u32 temp; temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); temp &= ~PORT_PLL_ENABLE; @@ -1597,7 +1598,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; enum dpio_phy phy; enum dpio_channel ch; - uint32_t val; + u32 val; bool ret; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); @@ -1669,12 +1670,12 @@ out: /* bxt clock parameters */ struct bxt_clk_div { int clock; - uint32_t p1; - uint32_t p2; - uint32_t m2_int; - uint32_t m2_frac; + u32 p1; + u32 p2; + u32 m2_int; + u32 m2_frac; bool m2_frac_en; - uint32_t n; + u32 n; int vco; }; @@ -1741,8 +1742,8 @@ static bool bxt_ddi_set_dpll_hw_state(int clock, struct intel_dpll_hw_state *dpll_hw_state) { int vco = clk_div->vco; - uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; - uint32_t lanestagger; + u32 prop_coef, int_coef, gain_ctl, targ_cnt; + u32 lanestagger; if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; @@ -1891,7 +1892,7 @@ static void intel_ddi_pll_init(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); if (INTEL_GEN(dev_priv) < 9) { - uint32_t val = I915_READ(LCPLL_CTL); + u32 val = I915_READ(LCPLL_CTL); /* * The LCPLL register should be turned on by the BIOS. For now @@ -1977,7 +1978,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; /* 1. Enable DPLL power in DPLL_ENABLE. */ val = I915_READ(CNL_DPLL_ENABLE(id)); @@ -2052,7 +2053,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - uint32_t val; + u32 val; /* * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI. @@ -2110,7 +2111,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; - uint32_t val; + u32 val; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, @@ -2246,7 +2247,7 @@ cnl_ddi_calculate_wrpll(int clock, struct skl_wrpll_params *wrpll_params) { u32 afe_clock = clock * 5; - uint32_t ref_clock; + u32 ref_clock; u32 dco_min = 7998000; u32 dco_max = 10000000; u32 dco_mid = (dco_min + dco_max) / 2; @@ -2292,7 +2293,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, int clock) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t cfgcr0, cfgcr1; + u32 cfgcr0, cfgcr1; struct skl_wrpll_params wrpll_params = { 0, }; cfgcr0 = DPLL_CFGCR0_HDMI_MODE; @@ -2321,7 +2322,7 @@ static bool cnl_ddi_dp_set_dpll_hw_state(int clock, struct intel_dpll_hw_state *dpll_hw_state) { - uint32_t cfgcr0; + u32 cfgcr0; cfgcr0 = DPLL_CFGCR0_SSC_ENABLE; @@ -2538,7 +2539,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - uint32_t cfgcr0, cfgcr1; + u32 cfgcr0, cfgcr1; struct skl_wrpll_params pll_params = { 0 }; bool ret; @@ -2568,10 +2569,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, } int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - uint32_t pll_id) + u32 pll_id) { - uint32_t cfgcr0, cfgcr1; - uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction; + u32 cfgcr0, cfgcr1; + u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction; const struct skl_wrpll_params *params; int index, n_entries, link_clock; @@ -2654,10 +2655,10 @@ bool intel_dpll_is_combophy(enum intel_dpll_id id) } static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, - uint32_t *target_dco_khz, + u32 *target_dco_khz, struct intel_dpll_hw_state *state) { - uint32_t dco_min_freq, dco_max_freq; + u32 dco_min_freq, dco_max_freq; int div1_vals[] = {7, 5, 3, 2}; unsigned int i; int div2; @@ -2733,12 +2734,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int refclk_khz = dev_priv->cdclk.hw.ref; - uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; - uint32_t iref_ndiv, iref_trim, iref_pulse_w; - uint32_t prop_coeff, int_coeff; - uint32_t tdc_targetcnt, feedfwgain; - uint64_t ssc_stepsize, ssc_steplen, ssc_steplog; - uint64_t tmp; + u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; + u32 iref_ndiv, iref_trim, iref_pulse_w; + u32 prop_coeff, int_coeff; + u32 tdc_targetcnt, feedfwgain; + u64 ssc_stepsize, ssc_steplen, ssc_steplog; + u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); @@ -2761,7 +2762,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } m2div_rem = dco_khz % (refclk_khz * m1div); - tmp = (uint64_t)m2div_rem * (1 << 22); + tmp = (u64)m2div_rem * (1 << 22); do_div(tmp, refclk_khz * m1div); m2div_frac = tmp; @@ -2820,11 +2821,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } if (use_ssc) { - tmp = (uint64_t)dco_khz * 47 * 32; + tmp = (u64)dco_khz * 47 * 32; do_div(tmp, refclk_khz * m1div * 10000); ssc_stepsize = tmp; - tmp = (uint64_t)dco_khz * 1000; + tmp = (u64)dco_khz * 1000; ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32); } else { ssc_stepsize = 0; @@ -2974,7 +2975,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; bool ret = false; enum port port; - uint32_t val; + u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS); @@ -3101,7 +3102,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); - uint32_t val; + u32 val; val = I915_READ(enable_reg); val |= PLL_POWER_ENABLE; @@ -3142,7 +3143,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); - uint32_t val; + u32 val; /* The first steps are done by intel_ddi_post_disable(). */ diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index a033d8f06d4a..e96e79413b54 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -138,14 +138,14 @@ enum intel_dpll_id { struct intel_dpll_hw_state { /* i9xx, pch plls */ - uint32_t dpll; - uint32_t dpll_md; - uint32_t fp0; - uint32_t fp1; + u32 dpll; + u32 dpll_md; + u32 fp0; + u32 fp1; /* hsw, bdw */ - uint32_t wrpll; - uint32_t spll; + u32 wrpll; + u32 spll; /* skl */ /* @@ -154,34 +154,33 @@ struct intel_dpll_hw_state { * the register. This allows us to easily compare the state to share * the DPLL. */ - uint32_t ctrl1; + u32 ctrl1; /* HDMI only, 0 when used for DP */ - uint32_t cfgcr1, cfgcr2; + u32 cfgcr1, cfgcr2; /* cnl */ - uint32_t cfgcr0; + u32 cfgcr0; /* CNL also uses cfgcr1 */ /* bxt */ - uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, - pcsdw12; + u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12; /* * ICL uses the following, already defined: - * uint32_t cfgcr0, cfgcr1; - */ - uint32_t mg_refclkin_ctl; - uint32_t mg_clktop2_coreclkctl1; - uint32_t mg_clktop2_hsclkctl; - uint32_t mg_pll_div0; - uint32_t mg_pll_div1; - uint32_t mg_pll_lf; - uint32_t mg_pll_frac_lock; - uint32_t mg_pll_ssc; - uint32_t mg_pll_bias; - uint32_t mg_pll_tdc_coldst_bias; - uint32_t mg_pll_bias_mask; - uint32_t mg_pll_tdc_coldst_bias_mask; + * u32 cfgcr0, cfgcr1; + */ + u32 mg_refclkin_ctl; + u32 mg_clktop2_coreclkctl1; + u32 mg_clktop2_hsclkctl; + u32 mg_pll_div0; + u32 mg_pll_div1; + u32 mg_pll_lf; + u32 mg_pll_frac_lock; + u32 mg_pll_ssc; + u32 mg_pll_bias; + u32 mg_pll_tdc_coldst_bias; + u32 mg_pll_bias_mask; + u32 mg_pll_tdc_coldst_bias_mask; }; /** @@ -280,7 +279,7 @@ struct dpll_info { * Inform the state checker that the DPLL is kept enabled even if * not in use by any CRTC. */ - uint32_t flags; + u32 flags; }; /** @@ -343,7 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - uint32_t pll_id); + u32 pll_id); int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); enum intel_dpll_id icl_port_to_mg_pll_id(enum port port); bool intel_dpll_is_combophy(enum intel_dpll_id id); -- cgit v1.2.3 From 830de4220a27735f8893830cbb84621f23b031b3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:27 +0200 Subject: drm/i915/dp: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch/whitespace fixes sprinkled on top of the changed lines. Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/3c030a12b4313eec512ce2b7a953cff439d8af67.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 142 +++++++++++++------------- drivers/gpu/drm/i915/intel_dp_link_training.c | 32 +++--- 2 files changed, 87 insertions(+), 87 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index df4292bb1a4f..808ccdae15b8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -429,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) } static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, - uint8_t lane_count) + u8 lane_count) { /* * FIXME: we need to synchronize the current link parameters with @@ -449,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int link_rate, - uint8_t lane_count) + u8 lane_count) { const struct drm_display_mode *fixed_mode = intel_dp->attached_connector->panel.fixed_mode; @@ -464,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, } int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count) + int link_rate, u8 lane_count) { int index; @@ -572,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_OK; } -uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) +u32 intel_dp_pack_aux(const u8 *src, int src_bytes) { - int i; - uint32_t v = 0; + int i; + u32 v = 0; if (src_bytes > 4) src_bytes = 4; for (i = 0; i < src_bytes; i++) - v |= ((uint32_t) src[i]) << ((3-i) * 8); + v |= ((u32)src[i]) << ((3 - i) * 8); return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -643,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); - uint32_t DP; + u32 DP; if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, "skipping pipe %c power sequencer kick due to port %c being active\n", @@ -1051,12 +1051,12 @@ intel_dp_check_edp(struct intel_dp *intel_dp) } } -static uint32_t +static u32 intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); - uint32_t status; + u32 status; bool done; #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) @@ -1069,7 +1069,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) return status; } -static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1083,7 +1083,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } -static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -1102,7 +1102,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } -static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -1119,7 +1119,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return ilk_get_aux_clock_divider(intel_dp, index); } -static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { /* * SKL doesn't need us to program the AUX clock divider (Hardware will @@ -1129,14 +1129,14 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return index ? 0 : 1; } -static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, - int send_bytes, - uint32_t aux_clock_divider) +static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 aux_clock_divider) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - uint32_t precharge, timeout; + u32 precharge, timeout; if (IS_GEN(dev_priv, 6)) precharge = 3; @@ -1159,12 +1159,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); } -static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, - int send_bytes, - uint32_t unused) +static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 unused) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - uint32_t ret; + u32 ret; ret = DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | @@ -1184,19 +1184,19 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, static int intel_dp_aux_xfer(struct intel_dp *intel_dp, - const uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size, + const u8 *send, int send_bytes, + u8 *recv, int recv_size, u32 aux_send_ctl_flags) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); i915_reg_t ch_ctl, ch_data[5]; - uint32_t aux_clock_divider; + u32 aux_clock_divider; intel_wakeref_t wakeref; int i, ret, recv_bytes; int try, clock = 0; - uint32_t status; + u32 status; bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); @@ -1369,7 +1369,7 @@ static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); - uint8_t txbuf[20], rxbuf[20]; + u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; int ret; @@ -1702,7 +1702,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) } void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, - uint8_t *link_bw, uint8_t *rate_select) + u8 *link_bw, u8 *rate_select) { /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { @@ -2217,7 +2217,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, } void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count, + int link_rate, u8 lane_count, bool link_mst) { intel_dp->link_trained = false; @@ -3177,20 +3177,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder, static void _intel_dp_set_link_train(struct intel_dp *intel_dp, - uint32_t *DP, - uint8_t dp_train_pat) + u32 *DP, + u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); if (dp_train_pat & train_pat_mask) DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { - uint32_t temp = I915_READ(DP_TP_CTL(port)); + u32 temp = I915_READ(DP_TP_CTL(port)); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -3289,7 +3289,7 @@ static void intel_enable_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - uint32_t dp_reg = I915_READ(intel_dp->output_reg); + u32 dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; @@ -3508,14 +3508,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder, * link status information */ bool -intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) { return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } /* These are source-specific values. */ -uint8_t +u8 intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -3534,8 +3534,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } -uint8_t -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; @@ -3580,12 +3580,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) } } -static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) +static u32 vlv_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3666,12 +3666,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) return 0; } -static uint32_t chv_signal_levels(struct intel_dp *intel_dp) +static u32 chv_signal_levels(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u32 deemph_reg_value, margin_reg_value; bool uniq_trans_scale = false; - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3749,10 +3749,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } -static uint32_t -g4x_signal_levels(uint8_t train_set) +static u32 +g4x_signal_levels(u8 train_set) { - uint32_t signal_levels = 0; + u32 signal_levels = 0; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: @@ -3788,8 +3788,8 @@ g4x_signal_levels(uint8_t train_set) } /* SNB CPU eDP voltage swing and pre-emphasis control */ -static uint32_t -snb_cpu_edp_signal_levels(uint8_t train_set) +static u32 +snb_cpu_edp_signal_levels(u8 train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3816,8 +3816,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set) } /* IVB CPU eDP voltage swing and pre-emphasis control */ -static uint32_t -ivb_cpu_edp_signal_levels(uint8_t train_set) +static u32 +ivb_cpu_edp_signal_levels(u8 train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3852,8 +3852,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint32_t signal_levels, mask = 0; - uint8_t train_set = intel_dp->train_set[0]; + u32 signal_levels, mask = 0; + u8 train_set = intel_dp->train_set[0]; if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { signal_levels = bxt_signal_levels(intel_dp); @@ -3892,7 +3892,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = @@ -3909,7 +3909,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; - uint32_t val; + u32 val; if (!HAS_DDI(dev_priv)) return; @@ -3944,7 +3944,7 @@ intel_dp_link_down(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); enum port port = encoder->port; - uint32_t DP = intel_dp->DP; + u32 DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev_priv))) return; @@ -4285,7 +4285,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } -u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, int mode_clock, int mode_hdisplay) { u16 bits_per_pixel, max_bpp_small_joiner_ram; @@ -4352,7 +4352,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(uint8_t, min_slice_count, + min_slice_count = min_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); @@ -4370,11 +4370,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } -static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) { int status = 0; int test_link_rate; - uint8_t test_lane_count, test_link_bw; + u8 test_lane_count, test_link_bw; /* (DP CTS 1.2) * 4.3.1.11 */ @@ -4407,10 +4407,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) return DP_TEST_ACK; } -static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) { - uint8_t test_pattern; - uint8_t test_misc; + u8 test_pattern; + u8 test_misc; __be16 h_width, v_height; int status = 0; @@ -4468,9 +4468,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) return DP_TEST_ACK; } -static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) { - uint8_t test_result = DP_TEST_ACK; + u8 test_result = DP_TEST_ACK; struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; @@ -4512,16 +4512,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) return test_result; } -static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { - uint8_t test_result = DP_TEST_NAK; + u8 test_result = DP_TEST_NAK; return test_result; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { - uint8_t response = DP_TEST_NAK; - uint8_t request = 0; + u8 response = DP_TEST_NAK; + u8 request = 0; int status; status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); @@ -4847,8 +4847,8 @@ static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); - uint8_t *dpcd = intel_dp->dpcd; - uint8_t type; + u8 *dpcd = intel_dp->dpcd; + u8 type; if (lspcon->active) lspcon_resume(lspcon); @@ -5630,7 +5630,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, .address = DP_AUX_HDCP_AKSV, .size = DRM_HDCP_KSV_LEN, }; - uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; + u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; ssize_t dpcd_ret; int ret; diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 30be0e39bd5f..b59c87daa4f7 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -24,7 +24,7 @@ #include "intel_drv.h" static void -intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) { DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x", @@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE]) static void intel_get_adjust_train(struct intel_dp *intel_dp, - const uint8_t link_status[DP_LINK_STATUS_SIZE]) + const u8 link_status[DP_LINK_STATUS_SIZE]) { - uint8_t v = 0; - uint8_t p = 0; + u8 v = 0; + u8 p = 0; int lane; - uint8_t voltage_max; - uint8_t preemph_max; + u8 voltage_max; + u8 preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); if (this_v > v) v = this_v; @@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp, static bool intel_dp_set_link_train(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { - uint8_t buf[sizeof(intel_dp->train_set) + 1]; + u8 buf[sizeof(intel_dp->train_set) + 1]; int ret, len; intel_dp_program_link_training_pattern(intel_dp, dp_train_pat); @@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, - uint8_t dp_train_pat) + u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp); @@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { - uint8_t voltage; + u8 voltage; int voltage_tries, cr_tries, max_cr_tries; bool max_vswing_reached = false; - uint8_t link_config[2]; - uint8_t link_bw, rate_select; + u8 link_config[2]; + u8 link_bw, rate_select; if (intel_dp->prepare_link_retrain) intel_dp->prepare_link_retrain(intel_dp); @@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; + u8 link_status[DP_LINK_STATUS_SIZE]; drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); @@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) { int tries; u32 training_pattern; - uint8_t link_status[DP_LINK_STATUS_SIZE]; + u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; training_pattern = intel_dp_training_pattern(intel_dp); -- cgit v1.2.3 From c4aa2eca319c99a4dd21d10ebfd462a2612175f1 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 16 Jan 2019 11:15:28 +0200 Subject: drm/i915/sprite: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/63fe4b9727b55727190e50e57427f513d204f000.1547629303.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_sprite.c | 60 ++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 87a06fcca284..b02d3d9809e3 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -321,8 +321,8 @@ skl_program_scaler(struct intel_plane *plane, &crtc_state->scaler_state.scalers[scaler_id]; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); u16 y_hphase, uv_rgb_hphase; u16 y_vphase, uv_rgb_vphase; int hscale, vscale; @@ -477,10 +477,10 @@ skl_program_plane(struct intel_plane *plane, u32 aux_stride = skl_plane_stride(plane_state, 1); int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t x = plane_state->color_plane[color_plane].x; - uint32_t y = plane_state->color_plane[color_plane].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 x = plane_state->color_plane[color_plane].x; + u32 y = plane_state->color_plane[color_plane].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; struct intel_plane *linked = plane_state->linked_plane; const struct drm_framebuffer *fb = plane_state->base.fb; u8 alpha = plane_state->base.alpha >> 8; @@ -814,10 +814,10 @@ vlv_update_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; unsigned long irqflags; /* Sizes are 0 based */ @@ -976,12 +976,12 @@ ivb_update_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; /* Sizes are 0 based */ @@ -1152,12 +1152,12 @@ g4x_update_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 crtc_w = drm_rect_width(&plane_state->base.dst); + u32 crtc_h = drm_rect_height(&plane_state->base.dst); + u32 x = plane_state->color_plane[0].x; + u32 y = plane_state->color_plane[0].y; + u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; /* Sizes are 0 based */ @@ -1706,7 +1706,7 @@ out: return ret; } -static const uint32_t g4x_plane_formats[] = { +static const u32 g4x_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -1714,13 +1714,13 @@ static const uint32_t g4x_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint64_t i9xx_plane_format_modifiers[] = { +static const u64 i9xx_plane_format_modifiers[] = { I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; -static const uint32_t snb_plane_formats[] = { +static const u32 snb_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, @@ -1729,7 +1729,7 @@ static const uint32_t snb_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t vlv_plane_formats[] = { +static const u32 vlv_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, @@ -1743,7 +1743,7 @@ static const uint32_t vlv_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t skl_plane_formats[] = { +static const u32 skl_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1758,7 +1758,7 @@ static const uint32_t skl_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t skl_planar_formats[] = { +static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1774,7 +1774,7 @@ static const uint32_t skl_planar_formats[] = { DRM_FORMAT_NV12, }; -static const uint64_t skl_plane_format_modifiers_noccs[] = { +static const u64 skl_plane_format_modifiers_noccs[] = { I915_FORMAT_MOD_Yf_TILED, I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, @@ -1782,7 +1782,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = { DRM_FORMAT_MOD_INVALID }; -static const uint64_t skl_plane_format_modifiers_ccs[] = { +static const u64 skl_plane_format_modifiers_ccs[] = { I915_FORMAT_MOD_Yf_TILED_CCS, I915_FORMAT_MOD_Y_TILED_CCS, I915_FORMAT_MOD_Yf_TILED, -- cgit v1.2.3 From 1b4bd5c4a663663e8dd4b7b1c5f8565626eb068b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 16 Jan 2019 15:54:21 +0000 Subject: drm/i915: Limit the for_each_set_bit() to the valid range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let static analyzers (smatch) know that we are not going to wander off the end of the array by providing a tight upper bound: drivers/gpu/drm/i915/intel_display.c:9532 hsw_get_transcoder_state() error: buffer overflow 'dev_priv->__info.trans_offsets' 6 <= 31 References: 0716931a82b4 ("drm/i915/icl: fix transcoder state readout") Signed-off-by: Chris Wilson Cc: Jani Nikula Cc: Ville Syrjala Cc: Imre Deak Cc: Madhav Chauhan Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190116155421.7660-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8d6d7ae311f4..9a6fbce1cafc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9526,7 +9526,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ - for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) { + for_each_set_bit(panel_transcoder, + &panel_transcoder_mask, + ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { enum pipe trans_pipe; tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); -- cgit v1.2.3 From 6ddbb12e3f54e491abd1660d34da255e462b0aa2 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 17 Jan 2019 14:48:31 +0000 Subject: drm/i915: Fix wakeref cookie handling in debugfs/i915_forcewake_user To avoid a false positive of a leaked wakeref, we can store the cookie in file->private_data and use it in intel_runtime_pm_put. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190117144831.13156-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index cdf7730a79df..0f100fd5ff2c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4413,7 +4413,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - intel_runtime_pm_get(i915); + file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); intel_uncore_forcewake_user_get(i915); return 0; @@ -4427,7 +4427,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; intel_uncore_forcewake_user_put(i915); - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put(i915, + (intel_wakeref_t)(uintptr_t)file->private_data); return 0; } -- cgit v1.2.3 From 1dfbea041ffd2293634b1a77650b195e58e7487a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 17 Jan 2019 23:31:26 +0000 Subject: drm/i915/breadcrumbs: Drop assertion that we've already enabled irqs The motivation for introducing the check that we only enable breadcrumb irqs if the device's irq was installed was once upon a time we waited during suspend after disabling interrupts (which was quite slow until the bug was discovered). Since then we have the notion of pinning the breadcrumb irq, broadening it from the sole purpose of user interrupt notification and waiting, and more importantly decoupling it from a very defined time period during which enabling the irq was expected. So stop insisting the irq is installed before we setup our IMR masks, if the IER isn't yet enabled, nothing will happen and we will timeout instead, revealing the lack of irq in the hang debug messages. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190117233126.30165-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_breadcrumbs.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 4ed7105d7ff5..bfbff04c16aa 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -158,30 +158,24 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t) static void irq_enable(struct intel_engine_cs *engine) { - /* - * FIXME: Ideally we want this on the API boundary, but for the - * sake of testing with mock breadcrumbs (no HW so unable to - * enable irqs) we place it deep within the bowels, at the point - * of no return. - */ - GEM_BUG_ON(!intel_irqs_enabled(engine->i915)); + if (!engine->irq_enable) + return; /* Caller disables interrupts */ - if (engine->irq_enable) { - spin_lock(&engine->i915->irq_lock); - engine->irq_enable(engine); - spin_unlock(&engine->i915->irq_lock); - } + spin_lock(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock(&engine->i915->irq_lock); } static void irq_disable(struct intel_engine_cs *engine) { + if (!engine->irq_disable) + return; + /* Caller disables interrupts */ - if (engine->irq_disable) { - spin_lock(&engine->i915->irq_lock); - engine->irq_disable(engine); - spin_unlock(&engine->i915->irq_lock); - } + spin_lock(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock(&engine->i915->irq_lock); } void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) -- cgit v1.2.3 From 8d714185951b04368f6695a8c01f5c9c332ac1d3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 17 Jan 2019 23:05:12 +0000 Subject: drm/i915/selftests: Query the vm under test for hugepage support Since we have the ppgtt we want to test, we can ask it directly if it is suitable for the hugepage test we intend to undertake. v2: Not everyone has full-ppgtt Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190117230512.4789-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/huge_pages.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index a52450111802..a9a2fa35876f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!HAS_FULL_48BIT_PPGTT(dev_priv)) { + if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } -- cgit v1.2.3 From 293f8c0f2bb4409705e4cbc7bc1b660c024e9be9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 18 Jan 2019 11:22:25 +0000 Subject: drm/i915: Use b->irq_enable() as predicate for mock engine Since commit d4ccceb05591 ("drm/i915/icl: Ringbuffer interrupt handling") we have required a mechanism to avoid touching the interrupt hardware for breadcrumbs, superseding our mock interface for selftests. The residual problem (ideas welcome) is in probing the mock ring registers for ring_is_idle. Hmm, maybe we should just install mock handlers for i915->uncore.mmio__write and friends? Only problem being is that we would to truly mock some expected reads. :( References: d4ccceb05591 ("drm/i915/icl: Ringbuffer interrupt handling") Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190118112225.13780-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_breadcrumbs.c | 17 ++++------------- drivers/gpu/drm/i915/intel_engine_cs.c | 11 ++++------- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 - drivers/gpu/drm/i915/selftests/mock_engine.c | 1 - 4 files changed, 8 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index bfbff04c16aa..4fad93fe3678 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -287,25 +287,16 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) if (b->irq_armed) return false; - /* The breadcrumb irq will be disarmed on the interrupt after the + /* + * The breadcrumb irq will be disarmed on the interrupt after the * waiters are signaled. This gives us a single interrupt window in * which we can add a new waiter and avoid the cost of re-enabling * the irq. */ b->irq_armed = true; - if (I915_SELFTEST_ONLY(b->mock)) { - /* For our mock objects we want to avoid interaction - * with the real hardware (which is not set up). So - * we simply pretend we have enabled the powerwell - * and the irq, and leave it up to the mock - * implementation to call intel_engine_wakeup() - * itself when it wants to simulate a user interrupt, - */ - return true; - } - - /* Since we are waiting on a request, the GPU should be busy + /* + * Since we are waiting on a request, the GPU should be busy * and should have its own rpm reference. This is tracked * by i915->gt.awake, we can forgo holding our own wakref * for the interrupt as before i915->gt.awake is released (when diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index cc6379422bc0..639bcd4cf3e9 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -917,6 +917,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) intel_wakeref_t wakeref; bool idle = true; + if (I915_SELFTEST_ONLY(!engine->mmio_base)) + return true; + /* If the whole device is asleep, the engine must be idle */ wakeref = intel_runtime_pm_get_if_in_use(dev_priv); if (!wakeref) @@ -955,9 +958,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (!intel_engine_signaled(engine, intel_engine_last_submit(engine))) return false; - if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) - return true; - /* Waiting to drain ELSP? */ if (READ_ONCE(engine->execlists.active)) { struct tasklet_struct *t = &engine->execlists.tasklet; @@ -983,10 +983,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return false; /* Ring stopped? */ - if (!ring_is_idle(engine)) - return false; - - return true; + return ring_is_idle(engine); } bool intel_engines_are_idle(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 616f6bbb18ad..c3ef0f9bf321 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -398,7 +398,6 @@ struct intel_engine_cs { unsigned int irq_count; bool irq_armed : 1; - I915_SELFTEST_DECLARE(bool mock : 1); } breadcrumbs; struct { diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 50e1a0b1af7e..9fe5b2c8f8d4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -201,7 +201,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); intel_engine_init_breadcrumbs(&engine->base); - engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ /* fake hw queue */ spin_lock_init(&engine->hw_lock); -- cgit v1.2.3 From 71fc448c1aaf896f7859c46a7c0c33fbac411455 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 18 Jan 2019 11:36:32 +0000 Subject: drm/i915/selftests: Make evict tolerant of foreign objects The evict selftests presumed that all objects in use had been allocated by itself. This is a dubious claim and so instead of asserting complete control over the object lists, take (temporary) ownership of them instead. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190118113632.7056-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 64 ++++++++++++++++++++----- 1 file changed, 53 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 067e5dfa0a24..543d618c152b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -31,30 +31,63 @@ static int populate_ggtt(struct drm_i915_private *i915) { - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj, *on; + unsigned long expected_unbound, expected_bound; + unsigned long unbound, bound, count; u64 size; + int err; + + expected_unbound = 0; + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { + i915_gem_object_get(obj); + expected_unbound++; + } + + expected_bound = 0; + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { + i915_gem_object_get(obj); + expected_bound++; + } + count = 0; for (size = 0; size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; size += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto cleanup; + } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto cleanup; + } + + count++; } - if (!list_empty(&i915->mm.unbound_list)) { - size = 0; - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - size++; + unbound = 0; + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) + unbound++; + if (unbound != expected_unbound) { + pr_err("%s: Found %lu objects unbound, expected %lu!\n", + __func__, unbound, expected_unbound); + err = -EINVAL; + goto cleanup; + } - pr_err("Found %lld objects unbound!\n", size); - return -EINVAL; + bound = 0; + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) + bound++; + if (bound != expected_bound + count) { + pr_err("%s: Found %lu objects bound, expected %lu!\n", + __func__, bound, expected_bound + count); + err = -EINVAL; + goto cleanup; } if (list_empty(&i915->ggtt.vm.inactive_list)) { @@ -63,6 +96,15 @@ static int populate_ggtt(struct drm_i915_private *i915) } return 0; + +cleanup: + list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link) + i915_gem_object_put(obj); + + list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link) + i915_gem_object_put(obj); + + return err; } static void unpin_ggtt(struct drm_i915_private *i915) -- cgit v1.2.3 From bfb0a2cb2b2d8ff3620b8cd21fdaf00c28998e19 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 17 Jan 2019 14:14:00 +0200 Subject: drm/i915/dp: remove PANEL_POWER_OFF macro and its use It's superfluous. Reviewed-by: Mika Kuoppala Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/7987938a7950853ac3ee43c82fb9cbb0cd59a2fa.1547726792.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 1 - drivers/gpu/drm/i915/intel_dp.c | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9a1340cfda6c..93cbd057c07a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4687,7 +4687,6 @@ enum { #define EDP_FORCE_VDD (1 << 3) #define EDP_BLC_ENABLE (1 << 2) #define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_OFF (0 << 0) #define PANEL_POWER_ON (1 << 0) #define _PP_ON_DELAYS 0x61208 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 808ccdae15b8..f7d5314e3395 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1000,8 +1000,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, /* 0x1F write to PP_DIV_REG sets max cycle delay */ I915_WRITE(pp_div_reg, pp_div | 0x1F); - I915_WRITE(pp_ctrl_reg, - PANEL_UNLOCK_REGS | PANEL_POWER_OFF); + I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS); msleep(intel_dp->panel_power_cycle_delay); } } -- cgit v1.2.3 From 209760b7f6eefce3a35cd5e134bd59155ec98888 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 18 Jan 2019 19:08:05 +0000 Subject: drm/i915/selftests: Allocate mock ring/timeline per context To correctly simulate preemption between contexts, we need independent timelines along each context. Make it so. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190118190805.11792-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/mock_engine.c | 90 +++++++++++++++------------- 1 file changed, 47 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 9fe5b2c8f8d4..8b8d51af7d6a 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -30,6 +30,36 @@ struct mock_ring { struct i915_timeline timeline; }; +static struct intel_ring *mock_ring(struct intel_engine_cs *engine) +{ + const unsigned long sz = PAGE_SIZE / 2; + struct mock_ring *ring; + + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); + if (!ring) + return NULL; + + i915_timeline_init(engine->i915, &ring->timeline, engine->name); + + ring->base.size = sz; + ring->base.effective_size = sz; + ring->base.vaddr = (void *)(ring + 1); + ring->base.timeline = &ring->timeline; + + INIT_LIST_HEAD(&ring->base.request_list); + intel_ring_update_space(&ring->base); + + return &ring->base; +} + +static void mock_ring_free(struct intel_ring *base) +{ + struct mock_ring *ring = container_of(base, typeof(*ring), base); + + i915_timeline_fini(&ring->timeline); + kfree(ring); +} + static struct mock_request *first_request(struct mock_engine *engine) { return list_first_entry_or_null(&engine->hw_queue, @@ -80,6 +110,9 @@ static void mock_context_unpin(struct intel_context *ce) static void mock_context_destroy(struct intel_context *ce) { GEM_BUG_ON(ce->pin_count); + + if (ce->ring) + mock_ring_free(ce->ring); } static const struct intel_context_ops mock_context_ops = { @@ -93,13 +126,22 @@ mock_context_pin(struct intel_engine_cs *engine, { struct intel_context *ce = to_intel_context(ctx, engine); - if (!ce->pin_count++) { - i915_gem_context_get(ctx); - ce->ring = engine->buffer; - ce->ops = &mock_context_ops; + if (ce->pin_count++) + return ce; + + if (!ce->ring) { + ce->ring = mock_ring(engine); + if (!ce->ring) + goto err; } + ce->ops = &mock_context_ops; + i915_gem_context_get(ctx); return ce; + +err: + ce->pin_count = 0; + return ERR_PTR(-ENOMEM); } static int mock_request_alloc(struct i915_request *request) @@ -143,36 +185,6 @@ static void mock_submit_request(struct i915_request *request) spin_unlock_irq(&engine->hw_lock); } -static struct intel_ring *mock_ring(struct intel_engine_cs *engine) -{ - const unsigned long sz = PAGE_SIZE / 2; - struct mock_ring *ring; - - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); - if (!ring) - return NULL; - - i915_timeline_init(engine->i915, &ring->timeline, engine->name); - - ring->base.size = sz; - ring->base.effective_size = sz; - ring->base.vaddr = (void *)(ring + 1); - ring->base.timeline = &ring->timeline; - - INIT_LIST_HEAD(&ring->base.request_list); - intel_ring_update_space(&ring->base); - - return &ring->base; -} - -static void mock_ring_free(struct intel_ring *base) -{ - struct mock_ring *ring = container_of(base, typeof(*ring), base); - - i915_timeline_fini(&ring->timeline); - kfree(ring); -} - struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, const char *name, int id) @@ -207,17 +219,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, timer_setup(&engine->hw_delay, hw_delay_complete, 0); INIT_LIST_HEAD(&engine->hw_queue); - engine->base.buffer = mock_ring(&engine->base); - if (!engine->base.buffer) - goto err_breadcrumbs; - if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base))) - goto err_ring; + goto err_breadcrumbs; return &engine->base; -err_ring: - mock_ring_free(engine->base.buffer); err_breadcrumbs: intel_engine_fini_breadcrumbs(&engine->base); i915_timeline_fini(&engine->base.timeline); @@ -260,8 +266,6 @@ void mock_engine_free(struct intel_engine_cs *engine) __intel_context_unpin(engine->i915->kernel_context, engine); - mock_ring_free(engine->buffer); - intel_engine_fini_breadcrumbs(engine); i915_timeline_fini(&engine->timeline); -- cgit v1.2.3 From 129fe7516b233892eb6ded103b10c61d9a0a52cb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:19 +0200 Subject: drm/i915/color: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_color.c | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 37fd9ddf762e..299eb7858adc 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -142,7 +142,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, pipe = crtc->pipe; - uint16_t coeffs[9] = { 0, }; + u16 coeffs[9] = { 0, }; bool limited_color_range = false; /* @@ -171,7 +171,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state) * hardware. */ for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i]; + u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; /* * Clamp input value to min/max supported by @@ -233,7 +233,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state) I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); if (INTEL_GEN(dev_priv) > 6) { - uint16_t postoff = 0; + u16 postoff = 0; if (limited_color_range) postoff = (16 * (1 << 12) / 255) & 0x1fff; @@ -244,7 +244,7 @@ static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state) I915_WRITE(PIPE_CSC_MODE(pipe), 0); } else { - uint32_t mode = CSC_MODE_YUV_TO_RGB; + u32 mode = CSC_MODE_YUV_TO_RGB; if (limited_color_range) mode |= CSC_BLACK_SCREEN_OFFSET; @@ -261,15 +261,15 @@ static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state) struct drm_device *dev = crtc_state->base.crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - uint32_t mode; + u32 mode; if (crtc_state->base.ctm) { struct drm_color_ctm *ctm = crtc_state->base.ctm->data; - uint16_t coeffs[9] = { 0, }; + u16 coeffs[9] = { 0, }; int i; for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - uint64_t abs_coeff = + u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; /* Round coefficient. */ @@ -331,7 +331,7 @@ static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state, if (blob) { struct drm_color_lut *lut = blob->data; for (i = 0; i < 256; i++) { - uint32_t word = + u32 word = (drm_color_lut_extract(lut[i].red, 8) << 16) | (drm_color_lut_extract(lut[i].green, 8) << 8) | drm_color_lut_extract(lut[i].blue, 8); @@ -343,7 +343,7 @@ static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state, } } else { for (i = 0; i < 256; i++) { - uint32_t word = (i << 16) | (i << 8) | i; + u32 word = (i << 16) | (i << 8) | i; if (HAS_GMCH_DISPLAY(dev_priv)) I915_WRITE(PALETTE(pipe, i), word); @@ -388,7 +388,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT); @@ -397,7 +397,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state) struct drm_color_lut *lut = crtc_state->base.degamma_lut->data; for (i = 0; i < lut_size; i++) { - uint32_t word = + u32 word = drm_color_lut_extract(lut[i].red, 10) << 20 | drm_color_lut_extract(lut[i].green, 10) << 10 | drm_color_lut_extract(lut[i].blue, 10); @@ -406,7 +406,7 @@ static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state) } } else { for (i = 0; i < lut_size; i++) { - uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1); I915_WRITE(PREC_PAL_DATA(pipe), (v << 20) | (v << 10) | v); @@ -418,7 +418,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK); @@ -431,7 +431,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset) struct drm_color_lut *lut = crtc_state->base.gamma_lut->data; for (i = 0; i < lut_size; i++) { - uint32_t word = + u32 word = (drm_color_lut_extract(lut[i].red, 10) << 20) | (drm_color_lut_extract(lut[i].green, 10) << 10) | drm_color_lut_extract(lut[i].blue, 10); @@ -449,7 +449,7 @@ static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset) drm_color_lut_extract(lut[i].blue, 16)); } else { for (i = 0; i < lut_size; i++) { - uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1); I915_WRITE(PREC_PAL_DATA(pipe), (v << 20) | (v << 10) | v); @@ -491,8 +491,8 @@ static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - const uint32_t lut_size = 33; - uint32_t i; + const u32 lut_size = 33; + u32 i; /* * When setting the auto-increment bit, the hardware seems to @@ -507,7 +507,7 @@ static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state) * different values per channel, so this just loads a linear table. */ for (i = 0; i < lut_size; i++) { - uint32_t v = (i * (1 << 16)) / (lut_size - 1); + u32 v = (i * (1 << 16)) / (lut_size - 1); I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v); } @@ -544,8 +544,8 @@ static void cherryview_load_luts(struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; struct drm_color_lut *lut; - uint32_t i, lut_size; - uint32_t word0, word1; + u32 i, lut_size; + u32 word0, word1; if (crtc_state_is_legacy_gamma(crtc_state)) { /* Turn off degamma/gamma on CGM block. */ -- cgit v1.2.3 From 5ce9a6497914b21ca995ebbc0322b287b28176e8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:20 +0200 Subject: drm/i915/pm: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch fixes sprinkled on top of the changed lines. Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 213 ++++++++++++++++++++-------------------- 1 file changed, 105 insertions(+), 108 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7613ae72df3d..8b63afa3a221 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -480,7 +480,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) int sprite0_start, sprite1_start; switch (pipe) { - uint32_t dsparb, dsparb2, dsparb3; + u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ(DSPARB); dsparb2 = I915_READ(DSPARB2); @@ -513,7 +513,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; @@ -529,7 +529,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, static int i830_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x1ff; @@ -546,7 +546,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, static int i845_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - uint32_t dsparb = I915_READ(DSPARB); + u32 dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; @@ -667,9 +667,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate, unsigned int cpp, unsigned int latency) { - uint64_t ret; + u64 ret; - ret = (uint64_t) pixel_rate * cpp * latency; + ret = (u64)pixel_rate * cpp * latency; ret = DIV_ROUND_UP_ULL(ret, 10000); return ret; @@ -1089,9 +1089,9 @@ static int g4x_fbc_fifo_size(int level) } } -static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) +static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1188,9 +1188,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, return dirty; } -static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t pri_val); +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 pri_val); static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -1598,9 +1598,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) +static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1968,7 +1968,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, spin_lock(&dev_priv->uncore.lock); switch (crtc->pipe) { - uint32_t dsparb, dsparb2, dsparb3; + u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ_FW(DSPARB); dsparb2 = I915_READ_FW(DSPARB2); @@ -2262,8 +2262,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) { struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); const struct intel_watermark_params *wm_info; - uint32_t fwater_lo; - uint32_t fwater_hi; + u32 fwater_lo; + u32 fwater_hi; int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; @@ -2406,7 +2406,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; const struct drm_display_mode *adjusted_mode; - uint32_t fwater_lo; + u32 fwater_lo; int planea_wm; crtc = single_enabled_crtc(dev_priv); @@ -2455,8 +2455,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate, return ret; } -static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, - uint8_t cpp) +static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) { /* * Neither of these should be possible since this function shouldn't be @@ -2473,22 +2472,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, } struct ilk_wm_maximums { - uint16_t pri; - uint16_t spr; - uint16_t cur; - uint16_t fbc; + u16 pri; + u16 spr; + u16 cur; + u16 fbc; }; /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value, - bool is_lp) +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value, bool is_lp) { - uint32_t method1, method2; + u32 method1, method2; int cpp; if (mem_value == 0) @@ -2516,11 +2514,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value) +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value) { - uint32_t method1, method2; + u32 method1, method2; int cpp; if (mem_value == 0) @@ -2543,9 +2541,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t mem_value) +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 mem_value) { int cpp; @@ -2563,9 +2561,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, } /* Only for WM_LP. */ -static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - uint32_t pri_val) +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + u32 pri_val) { int cpp; @@ -2731,9 +2729,9 @@ static bool ilk_validate_wm_level(int level, DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", level, result->cur_val, max->cur); - result->pri_val = min_t(uint32_t, result->pri_val, max->pri); - result->spr_val = min_t(uint32_t, result->spr_val, max->spr); - result->cur_val = min_t(uint32_t, result->cur_val, max->cur); + result->pri_val = min_t(u32, result->pri_val, max->pri); + result->spr_val = min_t(u32, result->spr_val, max->spr); + result->cur_val = min_t(u32, result->cur_val, max->cur); result->enable = true; } @@ -2749,9 +2747,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_plane_state *curstate, struct intel_wm_level *result) { - uint16_t pri_latency = dev_priv->wm.pri_latency[level]; - uint16_t spr_latency = dev_priv->wm.spr_latency[level]; - uint16_t cur_latency = dev_priv->wm.cur_latency[level]; + u16 pri_latency = dev_priv->wm.pri_latency[level]; + u16 spr_latency = dev_priv->wm.spr_latency[level]; + u16 cur_latency = dev_priv->wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { @@ -2775,7 +2773,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static uint32_t +static u32 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) { const struct intel_atomic_state *intel_state = @@ -2804,10 +2802,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) } static void intel_read_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[8]) + u16 wm[8]) { if (INTEL_GEN(dev_priv) >= 9) { - uint32_t val; + u32 val; int ret, i; int level, max_level = ilk_wm_max_level(dev_priv); @@ -2891,7 +2889,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[0] += 1; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - uint64_t sskpd = I915_READ64(MCH_SSKPD); + u64 sskpd = I915_READ64(MCH_SSKPD); wm[0] = (sskpd >> 56) & 0xFF; if (wm[0] == 0) @@ -2901,14 +2899,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[3] = (sskpd >> 20) & 0x1FF; wm[4] = (sskpd >> 32) & 0x1FF; } else if (INTEL_GEN(dev_priv) >= 6) { - uint32_t sskpd = I915_READ(MCH_SSKPD); + u32 sskpd = I915_READ(MCH_SSKPD); wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; } else if (INTEL_GEN(dev_priv) >= 5) { - uint32_t mltr = I915_READ(MLTR_ILK); + u32 mltr = I915_READ(MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; @@ -2920,7 +2918,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, } static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5]) + u16 wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ if (IS_GEN(dev_priv, 5)) @@ -2928,7 +2926,7 @@ static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, } static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5]) + u16 wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ if (IS_GEN(dev_priv, 5)) @@ -2950,7 +2948,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) static void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, - const uint16_t wm[8]) + const u16 wm[8]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -2979,7 +2977,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, } static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, - uint16_t wm[5], uint16_t min) + u16 wm[5], u16 min) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -2988,7 +2986,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, wm[0] = max(wm[0], min); for (level = 1; level <= max_level; level++) - wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); return true; } @@ -3535,7 +3533,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, { struct ilk_wm_values *previous = &dev_priv->wm.hw; unsigned int dirty; - uint32_t val; + u32 val; dirty = ilk_compute_wm_dirty(dev_priv, previous, results); if (!dirty) @@ -4033,7 +4031,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { struct intel_plane *plane = to_intel_plane(pstate->base.plane); - uint32_t src_w, src_h, dst_w, dst_h; + u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4079,8 +4077,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) return pipe_downscale; if (crtc_state->pch_pfit.enabled) { - uint32_t src_w, src_h, dst_w, dst_h; - uint32_t pfit_size = crtc_state->pch_pfit.size; + u32 src_w, src_h, dst_w, dst_h; + u32 pfit_size = crtc_state->pch_pfit.size; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4113,7 +4111,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, const struct drm_plane_state *pstate; struct intel_plane_state *intel_pstate; int crtc_clock, dotclk; - uint32_t pipe_max_pixel_rate; + u32 pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); @@ -4169,8 +4167,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, { struct intel_plane *intel_plane = to_intel_plane(intel_pstate->base.plane); - uint32_t data_rate; - uint32_t width = 0, height = 0; + u32 data_rate; + u32 width = 0, height = 0; struct drm_framebuffer *fb; u32 format; uint_fixed_16_16_t down_scale_amount; @@ -4313,15 +4311,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; struct skl_plane_wm *wm; - uint16_t alloc_size, start = 0; - uint16_t total[I915_MAX_PLANES] = {}; - uint16_t uv_total[I915_MAX_PLANES] = {}; + u16 alloc_size, start = 0; + u16 total[I915_MAX_PLANES] = {}; + u16 uv_total[I915_MAX_PLANES] = {}; u64 total_data_rate; enum plane_id plane_id; int num_active; u64 plane_data_rate[I915_MAX_PLANES] = {}; u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; - uint16_t blocks = 0; + u16 blocks = 0; int level; /* Clear the partitioning for disabled planes. */ @@ -4493,10 +4491,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ static uint_fixed_16_16_t -skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate, - uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size) +skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate, + u8 cpp, u32 latency, u32 dbuf_block_size) { - uint32_t wm_intermediate_val; + u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) @@ -4511,12 +4509,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate, return ret; } -static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, - uint32_t pipe_htotal, - uint32_t latency, - uint_fixed_16_16_t plane_blocks_per_line) +static uint_fixed_16_16_t +skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, + uint_fixed_16_16_t plane_blocks_per_line) { - uint32_t wm_intermediate_val; + u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) @@ -4532,8 +4529,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, static uint_fixed_16_16_t intel_get_linetime_us(const struct intel_crtc_state *cstate) { - uint32_t pixel_rate; - uint32_t crtc_htotal; + u32 pixel_rate; + u32 crtc_htotal; uint_fixed_16_16_t linetime_us; if (!cstate->base.active) @@ -4550,11 +4547,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate) return linetime_us; } -static uint32_t +static u32 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { - uint64_t adjusted_pixel_rate; + u64 adjusted_pixel_rate; uint_fixed_16_16_t downscale_amount; /* Shouldn't reach here on disabled planes... */ @@ -4581,7 +4578,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_plane_state *pstate = &intel_pstate->base; const struct drm_framebuffer *fb = pstate->fb; - uint32_t interm_pbpl; + u32 interm_pbpl; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); @@ -4686,10 +4683,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, { struct drm_i915_private *dev_priv = to_i915(intel_pstate->base.plane->dev); - uint32_t latency = dev_priv->wm.skl_latency[level]; + u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; - uint32_t res_blocks, res_lines; + u32 res_blocks, res_lines; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); @@ -4795,13 +4792,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate, } } -static uint32_t +static u32 skl_compute_linetime_wm(const struct intel_crtc_state *cstate) { struct drm_atomic_state *state = cstate->base.state; struct drm_i915_private *dev_priv = to_i915(state->dev); uint_fixed_16_16_t linetime_us; - uint32_t linetime_wm; + u32 linetime_wm; linetime_us = intel_get_linetime_us(cstate); @@ -4824,9 +4821,9 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - uint16_t trans_min, trans_y_tile_min; - const uint16_t trans_amount = 10; /* This is configurable amount */ - uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; + u16 trans_min, trans_y_tile_min; + const u16 trans_amount = 10; /* This is configurable amount */ + u16 wm0_sel_res_b, trans_offset_b, res_blocks; /* Transition WM are not recommended by HW team for GEN9 */ if (INTEL_GEN(dev_priv) <= 9) @@ -4855,8 +4852,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, wm0_sel_res_b = wm->wm[0].plane_res_b - 1; if (wp->y_tiled) { - trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, - wp->y_tile_minimum); + trans_y_tile_min = + (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + trans_offset_b; } else { @@ -5030,7 +5027,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, i915_reg_t reg, const struct skl_wm_level *level) { - uint32_t val = 0; + u32 val = 0; if (level->plane_en) { val |= PLANE_WM_EN; @@ -5161,12 +5158,12 @@ static int skl_update_pipe_wm(struct intel_crtc_state *cstate, return 0; } -static uint32_t +static u32 pipes_modified(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state *cstate; - uint32_t i, ret = 0; + u32 i, ret = 0; for_each_new_intel_crtc_in_state(state, crtc, cstate, i) ret |= drm_crtc_mask(&crtc->base); @@ -5267,7 +5264,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) const struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; - uint32_t realloc_pipes = pipes_modified(state); + u32 realloc_pipes = pipes_modified(state); int ret, i; /* @@ -5566,7 +5563,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, mutex_unlock(&dev_priv->wm.wm_mutex); } -static inline void skl_wm_level_from_reg_val(uint32_t val, +static inline void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->plane_en = val & PLANE_WM_EN; @@ -5582,7 +5579,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; int level, max_level; enum plane_id plane_id; - uint32_t val; + u32 val; max_level = ilk_wm_max_level(dev_priv); @@ -5694,7 +5691,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) static void g4x_read_wm_values(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); @@ -5721,7 +5718,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { enum pipe pipe; - uint32_t tmp; + u32 tmp; for_each_pipe(dev_priv, pipe) { tmp = I915_READ(VLV_DDL(pipe)); @@ -8505,7 +8502,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; /* * Required for FBC @@ -8577,7 +8574,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) { int pipe; - uint32_t val; + u32 val; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -8612,7 +8609,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(MCH_SSKPD); if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) @@ -8622,7 +8619,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); @@ -8716,7 +8713,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) { - uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + u32 reg = I915_READ(GEN7_FF_THREAD_MODE); /* * WaVSThreadDispatchOverride:ivb,vlv @@ -8752,7 +8749,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) static void lpt_suspend_hw(struct drm_i915_private *dev_priv) { if (HAS_PCH_LPT_LP(dev_priv)) { - uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); + u32 val = I915_READ(SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); @@ -8990,7 +8987,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t snpcr; + u32 snpcr; I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); @@ -9199,7 +9196,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv) static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) { - uint32_t dspclk_gate; + u32 dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | @@ -9449,7 +9446,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) { - uint32_t flags = + u32 flags = I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; switch (flags) { @@ -9472,7 +9469,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) { - uint32_t flags = + u32 flags = I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; switch (flags) { -- cgit v1.2.3 From 3d0c5005d3f0c511307ec808919c85725c8d7644 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:21 +0200 Subject: drm/i915/ddi: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 52 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ce44744a5f9d..b0bb8dfc2ed5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -974,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); } -static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) +static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { switch (pll->info->id) { case DPLL_ID_WRPLL1: @@ -995,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) } } -static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static u32 icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { const struct intel_shared_dpll *pll = crtc_state->shared_dpll; int clock = crtc_state->port_clock; @@ -1243,8 +1243,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, enum intel_dpll_id pll_id) { i915_reg_t cfgcr1_reg, cfgcr2_reg; - uint32_t cfgcr1_val, cfgcr2_val; - uint32_t p0, p1, p2, dco_freq; + u32 cfgcr1_val, cfgcr2_val; + u32 p0, p1, p2, dco_freq; cfgcr1_reg = DPLL_CFGCR1(pll_id); cfgcr2_reg = DPLL_CFGCR2(pll_id); @@ -1305,8 +1305,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, enum intel_dpll_id pll_id) { - uint32_t cfgcr0, cfgcr1; - uint32_t p0, p1, p2, dco_freq, ref_clock; + u32 cfgcr0, cfgcr1; + u32 p0, p1, p2, dco_freq, ref_clock; if (INTEL_GEN(dev_priv) >= 11) { cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); @@ -1471,7 +1471,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int link_clock = 0; - uint32_t pll_id; + u32 pll_id; pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); if (intel_port_is_combophy(dev_priv, port)) { @@ -1496,7 +1496,7 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; - uint32_t cfgcr0; + u32 cfgcr0; enum intel_dpll_id pll_id; pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); @@ -1550,7 +1550,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int link_clock = 0; - uint32_t dpll_ctl1; + u32 dpll_ctl1; enum intel_dpll_id pll_id; pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); @@ -1739,7 +1739,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - uint32_t temp; + u32 temp; temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (state == true) @@ -1757,7 +1757,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; - uint32_t temp; + u32 temp; /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; @@ -1841,7 +1841,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - uint32_t val = I915_READ(reg); + u32 val = I915_READ(reg); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; @@ -1863,7 +1863,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, intel_wakeref_t wakeref; enum pipe pipe = 0; int ret = 0; - uint32_t tmp; + u32 tmp; wakeref = intel_display_power_get_if_enabled(dev_priv, intel_encoder->power_domain); @@ -1896,7 +1896,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) enum transcoder cpu_transcoder; intel_wakeref_t wakeref; enum pipe pipe = 0; - uint32_t tmp; + u32 tmp; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, @@ -2132,7 +2132,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, - enum port port, uint8_t iboost) + enum port port, u8 iboost) { u32 tmp; @@ -2151,7 +2151,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - uint8_t iboost; + u8 iboost; if (type == INTEL_OUTPUT_HDMI) iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; @@ -2665,7 +2665,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } -static uint32_t translate_signal_level(int signal_levels) +static u32 translate_signal_level(int signal_levels) { int i; @@ -2680,9 +2680,9 @@ static uint32_t translate_signal_level(int signal_levels) return 0; } -static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp) +static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) { - uint8_t train_set = intel_dp->train_set[0]; + u8 train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -2707,7 +2707,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) return 0; } -uint32_t ddi_signal_levels(struct intel_dp *intel_dp) +u32 ddi_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); @@ -2721,8 +2721,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) } static inline -uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum port port) +u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, + enum port port) { if (intel_port_is_combophy(dev_priv, port)) { return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port); @@ -2857,7 +2857,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - uint32_t val; + u32 val; const struct intel_shared_dpll *pll = crtc_state->shared_dpll; if (WARN_ON(!pll)) @@ -3356,7 +3356,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - uint32_t val; + u32 val; /* * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) @@ -3644,7 +3644,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - uint32_t val; + u32 val; bool wait = false; if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { -- cgit v1.2.3 From 5a01892ae58f621b10dd85ec41321bdd18077418 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:22 +0200 Subject: drm/i915/csr: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch/whitepace fixes sprinkled on top of the changed lines. v2: more whitespace fixes (Ville, José) Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-5-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 68 ++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index ea5fb64d33dd..e8ac04c33e29 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH); struct intel_css_header { /* 0x09 for DMC */ - uint32_t module_type; + u32 module_type; /* Includes the DMC specific header in dwords */ - uint32_t header_len; + u32 header_len; /* always value would be 0x10000 */ - uint32_t header_ver; + u32 header_ver; /* Not used */ - uint32_t module_id; + u32 module_id; /* Not used */ - uint32_t module_vendor; + u32 module_vendor; /* in YYYYMMDD format */ - uint32_t date; + u32 date; /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ - uint32_t size; + u32 size; /* Not used */ - uint32_t key_size; + u32 key_size; /* Not used */ - uint32_t modulus_size; + u32 modulus_size; /* Not used */ - uint32_t exponent_size; + u32 exponent_size; /* Not used */ - uint32_t reserved1[12]; + u32 reserved1[12]; /* Major Minor */ - uint32_t version; + u32 version; /* Not used */ - uint32_t reserved2[8]; + u32 reserved2[8]; /* Not used */ - uint32_t kernel_header_info; + u32 kernel_header_info; } __packed; struct intel_fw_info { - uint16_t reserved1; + u16 reserved1; /* Stepping (A, B, C, ..., *). * is a wildcard */ char stepping; @@ -121,8 +121,8 @@ struct intel_fw_info { /* Sub-stepping (0, 1, ..., *). * is a wildcard */ char substepping; - uint32_t offset; - uint32_t reserved2; + u32 offset; + u32 reserved2; } __packed; struct intel_package_header { @@ -135,14 +135,14 @@ struct intel_package_header { unsigned char reserved[10]; /* Number of valid entries in the FWInfo array below */ - uint32_t num_entries; + u32 num_entries; struct intel_fw_info fw_info[20]; } __packed; struct intel_dmc_header { /* always value would be 0x40403E3E */ - uint32_t signature; + u32 signature; /* DMC binary header length */ unsigned char header_len; @@ -151,30 +151,30 @@ struct intel_dmc_header { unsigned char header_ver; /* Reserved */ - uint16_t dmcc_ver; + u16 dmcc_ver; /* Major, Minor */ - uint32_t project; + u32 project; /* Firmware program size (excluding header) in dwords */ - uint32_t fw_size; + u32 fw_size; /* Major Minor version */ - uint32_t fw_version; + u32 fw_version; /* Number of valid MMIO cycles present. */ - uint32_t mmio_count; + u32 mmio_count; /* MMIO address */ - uint32_t mmioaddr[8]; + u32 mmioaddr[8]; /* MMIO data */ - uint32_t mmiodata[8]; + u32 mmiodata[8]; /* FW filename */ unsigned char dfile[32]; - uint32_t reserved1[2]; + u32 reserved1[2]; } __packed; struct stepping_info { @@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv) static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) { - uint32_t val, mask; + u32 val, mask; mask = DC_STATE_DEBUG_MASK_MEMORY_UP; @@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) void intel_csr_load_program(struct drm_i915_private *dev_priv) { u32 *payload = dev_priv->csr.dmc_payload; - uint32_t i, fw_size; + u32 i, fw_size; if (!HAS_CSR(dev_priv)) { DRM_ERROR("No CSR support available for this platform\n"); @@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) gen9_set_dc_state_debugmask(dev_priv); } -static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, - const struct firmware *fw) +static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, + const struct firmware *fw) { struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header *dmc_header; struct intel_csr *csr = &dev_priv->csr; const struct stepping_info *si = intel_get_stepping_info(dev_priv); - uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; - uint32_t i; - uint32_t *dmc_payload; + u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; + u32 i; + u32 *dmc_payload; if (!fw) return NULL; -- cgit v1.2.3 From ba3f4d0ad346a0ff83133b84a178d2d5fb0a2b37 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:23 +0200 Subject: drm/i915/display: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-6-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 104 +++++++++++++++++------------------ 1 file changed, 52 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9a6fbce1cafc..e0169979c51e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -52,7 +52,7 @@ #include "i915_trace.h" /* Primary plane formats for gen <= 3 */ -static const uint32_t i8xx_primary_formats[] = { +static const u32 i8xx_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB1555, @@ -60,7 +60,7 @@ static const uint32_t i8xx_primary_formats[] = { }; /* Primary plane formats for gen >= 4 */ -static const uint32_t i965_primary_formats[] = { +static const u32 i965_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -69,18 +69,18 @@ static const uint32_t i965_primary_formats[] = { DRM_FORMAT_XBGR2101010, }; -static const uint64_t i9xx_format_modifiers[] = { +static const u64 i9xx_format_modifiers[] = { I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* Cursor formats */ -static const uint32_t intel_cursor_formats[] = { +static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static const uint64_t cursor_format_modifiers[] = { +static const u64 cursor_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; @@ -496,7 +496,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } -static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) +static u32 i9xx_dpll_compute_m(struct dpll *dpll) { return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } @@ -531,8 +531,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) clock->p = clock->p1 * clock->p2; if (WARN_ON(clock->n == 0 || clock->p == 0)) return 0; - clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, - clock->n << 22); + clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m, + clock->n << 22); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot / 5; @@ -894,7 +894,7 @@ chv_find_best_dpll(const struct intel_limit *limit, struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; struct dpll clock; - uint64_t m2; + u64 m2; int found = false; memset(best_clock, 0, sizeof(*best_clock)); @@ -916,7 +916,7 @@ chv_find_best_dpll(const struct intel_limit *limit, clock.p = clock.p1 * clock.p2; - m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * + m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p * clock.n) << 22, refclk * clock.m1); if (m2 > INT_MAX/clock.m1) @@ -1613,7 +1613,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; - uint32_t val, pipeconf_val; + u32 val, pipeconf_val; /* Make sure PCH DPLL is enabled */ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); @@ -1701,7 +1701,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t reg; - uint32_t val; + u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -2378,7 +2378,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, return 0; } -static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) +static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) { switch (fb_modifier) { case I915_FORMAT_MOD_X_TILED: @@ -3510,7 +3510,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state, return stride / skl_plane_stride_mult(fb, color_plane, rotation); } -static u32 skl_plane_ctl_format(uint32_t pixel_format) +static u32 skl_plane_ctl_format(u32 pixel_format) { switch (pixel_format) { case DRM_FORMAT_C8: @@ -3580,7 +3580,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state } } -static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) +static u32 skl_plane_ctl_tiling(u64 fb_modifier) { switch (fb_modifier) { case DRM_FORMAT_MOD_LINEAR: @@ -4600,7 +4600,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) { - uint32_t temp; + u32 temp; temp = I915_READ(SOUTH_CHICKEN1); if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) @@ -5723,7 +5723,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = MBUS_DBOX_A_CREDIT(2); val |= MBUS_DBOX_BW_CREDIT(1); @@ -6627,9 +6627,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); } -static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) +static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) { - uint32_t pixel_rate; + u32 pixel_rate; pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; @@ -6639,8 +6639,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) */ if (pipe_config->pch_pfit.enabled) { - uint64_t pipe_w, pipe_h, pfit_w, pfit_h; - uint32_t pfit_size = pipe_config->pch_pfit.size; + u64 pipe_w, pipe_h, pfit_w, pfit_h; + u32 pfit_size = pipe_config->pch_pfit.size; pipe_w = pipe_config->pipe_src_w; pipe_h = pipe_config->pipe_src_h; @@ -6655,7 +6655,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) if (WARN_ON(!pfit_w || !pfit_h)) return pixel_rate; - pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, + pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h, pfit_w * pfit_h); } @@ -6751,7 +6751,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } static void -intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) +intel_reduce_m_n_ratio(u32 *num, u32 *den) { while (*num > DATA_LINK_M_N_MASK || *den > DATA_LINK_M_N_MASK) { @@ -6761,7 +6761,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) } static void compute_m_n(unsigned int m, unsigned int n, - uint32_t *ret_m, uint32_t *ret_n, + u32 *ret_m, u32 *ret_n, bool constant_n) { /* @@ -6776,7 +6776,7 @@ static void compute_m_n(unsigned int m, unsigned int n, else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); - *ret_m = div_u64((uint64_t) m * *ret_n, n); + *ret_m = div_u64((u64)m * *ret_n, n); intel_reduce_m_n_ratio(ret_m, ret_n); } @@ -6806,12 +6806,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) +static u32 pnv_dpll_compute_fp(struct dpll *dpll) { return (1 << dpll->n) << 16 | dpll->m2; } -static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) +static u32 i9xx_dpll_compute_fp(struct dpll *dpll) { return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; } @@ -7367,7 +7367,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; - uint32_t crtc_vtotal, crtc_vblank_end; + u32 crtc_vtotal, crtc_vblank_end; int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise @@ -7442,7 +7442,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - uint32_t tmp; + u32 tmp; tmp = I915_READ(HTOTAL(cpu_transcoder)); pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; @@ -7513,7 +7513,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t pipeconf; + u32 pipeconf; pipeconf = 0; @@ -7758,7 +7758,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t tmp; + u32 tmp; if (INTEL_GEN(dev_priv) <= 3 && (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) @@ -7974,7 +7974,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; - uint32_t tmp; + u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); @@ -8254,7 +8254,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = I915_READ(SOUTH_CHICKEN2); tmp |= FDI_MPHY_IOSFSB_RESET_CTL; @@ -8276,7 +8276,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) /* WaMPhyProgramming:hsw */ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) { - uint32_t tmp; + u32 tmp; tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); @@ -8357,7 +8357,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, bool with_spread, bool with_fdi) { - uint32_t reg, tmp; + u32 reg, tmp; if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) with_spread = true; @@ -8396,7 +8396,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, /* Sequence to disable CLKOUT_DP */ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) { - uint32_t reg, tmp; + u32 reg, tmp; mutex_lock(&dev_priv->sb_lock); @@ -8421,7 +8421,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) #define BEND_IDX(steps) ((50 + (steps)) / 5) -static const uint16_t sscdivintphase[] = { +static const u16 sscdivintphase[] = { [BEND_IDX( 50)] = 0x3B23, [BEND_IDX( 45)] = 0x3B23, [BEND_IDX( 40)] = 0x3C23, @@ -8453,7 +8453,7 @@ static const uint16_t sscdivintphase[] = { */ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) { - uint32_t tmp; + u32 tmp; int idx = BEND_IDX(steps); if (WARN_ON(steps % 5 != 0)) @@ -8519,7 +8519,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - uint32_t val; + u32 val; val = 0; @@ -8866,7 +8866,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; - uint32_t ps_ctrl = 0; + u32 ps_ctrl = 0; int id = -1; int i; @@ -9022,7 +9022,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t tmp; + u32 tmp; tmp = I915_READ(PF_CTL(crtc->pipe)); @@ -9048,7 +9048,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; - uint32_t tmp; + u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); @@ -9176,7 +9176,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); } -static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) +static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) { if (IS_HASWELL(dev_priv)) return I915_READ(D_COMP_HSW); @@ -9184,7 +9184,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) return I915_READ(D_COMP_BDW); } -static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) +static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); @@ -9209,7 +9209,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, bool switch_to_fclk, bool allow_power_down) { - uint32_t val; + u32 val; assert_can_disable_lcpll(dev_priv); @@ -9256,7 +9256,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, */ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; val = I915_READ(LCPLL_CTL); @@ -9331,7 +9331,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; DRM_DEBUG_KMS("Enabling package C8+\n"); @@ -9347,7 +9347,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - uint32_t val; + u32 val; DRM_DEBUG_KMS("Disabling package C8+\n"); @@ -9469,7 +9469,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; - uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); switch (ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: @@ -9639,7 +9639,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum port port; - uint32_t tmp; + u32 tmp; tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); @@ -13752,8 +13752,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); -- cgit v1.2.3 From 143c335ad27f61c6d8d4401203ab8cf9221382ac Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:24 +0200 Subject: drm/i915/i915_drv.h: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Reviewed-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-7-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 158 ++++++++++++++++++++-------------------- 1 file changed, 79 insertions(+), 79 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 310d9e1e1620..03db011caa8e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -334,16 +334,16 @@ struct drm_i915_display_funcs { struct intel_csr { struct work_struct work; const char *fw_path; - uint32_t required_version; - uint32_t max_fw_size; /* bytes */ - uint32_t *dmc_payload; - uint32_t dmc_fw_size; /* dwords */ - uint32_t version; - uint32_t mmio_count; + u32 required_version; + u32 max_fw_size; /* bytes */ + u32 *dmc_payload; + u32 dmc_fw_size; /* dwords */ + u32 version; + u32 mmio_count; i915_reg_t mmioaddr[8]; - uint32_t mmiodata[8]; - uint32_t dc_state; - uint32_t allowed_dc_mask; + u32 mmiodata[8]; + u32 dc_state; + u32 allowed_dc_mask; intel_wakeref_t wakeref; }; @@ -400,7 +400,7 @@ struct intel_fbc { struct { unsigned int mode_flags; - uint32_t hsw_bdw_pixel_rate; + u32 hsw_bdw_pixel_rate; } crtc; struct { @@ -419,7 +419,7 @@ struct intel_fbc { int y; - uint16_t pixel_blend_mode; + u16 pixel_blend_mode; } plane; struct { @@ -559,7 +559,7 @@ struct i915_suspend_saved_registers { u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - uint64_t saveFENCE[I915_MAX_NUM_FENCES]; + u64 saveFENCE[I915_MAX_NUM_FENCES]; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -906,9 +906,9 @@ struct i915_gem_mm { atomic_t bsd_engine_dispatch_index; /** Bit 6 swizzling required for X tiling */ - uint32_t bit_6_swizzle_x; + u32 bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ - uint32_t bit_6_swizzle_y; + u32 bit_6_swizzle_y; /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; @@ -935,20 +935,20 @@ struct ddi_vbt_port_info { * populate this field. */ #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff - uint8_t hdmi_level_shift; + u8 hdmi_level_shift; - uint8_t supports_dvi:1; - uint8_t supports_hdmi:1; - uint8_t supports_dp:1; - uint8_t supports_edp:1; - uint8_t supports_typec_usb:1; - uint8_t supports_tbt:1; + u8 supports_dvi:1; + u8 supports_hdmi:1; + u8 supports_dp:1; + u8 supports_edp:1; + u8 supports_typec_usb:1; + u8 supports_tbt:1; - uint8_t alternate_aux_channel; - uint8_t alternate_ddc_pin; + u8 alternate_aux_channel; + u8 alternate_ddc_pin; - uint8_t dp_boost_level; - uint8_t hdmi_boost_level; + u8 dp_boost_level; + u8 hdmi_boost_level; int dp_max_link_rate; /* 0 for not limited by VBT */ }; @@ -1039,41 +1039,41 @@ enum intel_ddb_partitioning { struct intel_wm_level { bool enable; - uint32_t pri_val; - uint32_t spr_val; - uint32_t cur_val; - uint32_t fbc_val; + u32 pri_val; + u32 spr_val; + u32 cur_val; + u32 fbc_val; }; struct ilk_wm_values { - uint32_t wm_pipe[3]; - uint32_t wm_lp[3]; - uint32_t wm_lp_spr[3]; - uint32_t wm_linetime[3]; + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + u32 wm_linetime[3]; bool enable_fbc_wm; enum intel_ddb_partitioning partitioning; }; struct g4x_pipe_wm { - uint16_t plane[I915_MAX_PLANES]; - uint16_t fbc; + u16 plane[I915_MAX_PLANES]; + u16 fbc; }; struct g4x_sr_wm { - uint16_t plane; - uint16_t cursor; - uint16_t fbc; + u16 plane; + u16 cursor; + u16 fbc; }; struct vlv_wm_ddl_values { - uint8_t plane[I915_MAX_PLANES]; + u8 plane[I915_MAX_PLANES]; }; struct vlv_wm_values { struct g4x_pipe_wm pipe[3]; struct g4x_sr_wm sr; struct vlv_wm_ddl_values ddl[3]; - uint8_t level; + u8 level; bool cxsr; }; @@ -1087,10 +1087,10 @@ struct g4x_wm_values { }; struct skl_ddb_entry { - uint16_t start, end; /* in number of blocks, 'end' is exclusive */ + u16 start, end; /* in number of blocks, 'end' is exclusive */ }; -static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) { return entry->end - entry->start; } @@ -1114,8 +1114,8 @@ struct skl_ddb_values { }; struct skl_wm_level { - uint16_t plane_res_b; - uint8_t plane_res_l; + u16 plane_res_b; + u8 plane_res_l; bool plane_en; }; @@ -1124,15 +1124,15 @@ struct skl_wm_params { bool x_tiled, y_tiled; bool rc_surface; bool is_planar; - uint32_t width; - uint8_t cpp; - uint32_t plane_pixel_rate; - uint32_t y_min_scanlines; - uint32_t plane_bytes_per_line; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; uint_fixed_16_16_t plane_blocks_per_line; uint_fixed_16_16_t y_tile_minimum; - uint32_t linetime_us; - uint32_t dbuf_block_size; + u32 linetime_us; + u32 dbuf_block_size; }; /* @@ -1515,14 +1515,14 @@ struct drm_i915_private { * Base address of where the gmbus and gpio blocks are located (either * on PCH or on SoC for platforms without PCH). */ - uint32_t gpio_mmio_base; + u32 gpio_mmio_base; /* MMIO base address for MIPI regs */ - uint32_t mipi_mmio_base; + u32 mipi_mmio_base; - uint32_t psr_mmio_base; + u32 psr_mmio_base; - uint32_t pps_mmio_base; + u32 pps_mmio_base; wait_queue_head_t gmbus_wait_queue; @@ -1777,17 +1777,17 @@ struct drm_i915_private { * in 0.5us units for WM1+. */ /* primary */ - uint16_t pri_latency[5]; + u16 pri_latency[5]; /* sprite */ - uint16_t spr_latency[5]; + u16 spr_latency[5]; /* cursor */ - uint16_t cur_latency[5]; + u16 cur_latency[5]; /* * Raw watermark memory latency values * for SKL for all 8 levels * in 1us units. */ - uint16_t skl_latency[8]; + u16 skl_latency[8]; /* current hardware state */ union { @@ -1797,7 +1797,7 @@ struct drm_i915_private { struct g4x_wm_values g4x; }; - uint8_t max_level; + u8 max_level; /* * Should be held around atomic WM register writing; also @@ -2686,45 +2686,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t mask, - uint32_t bits); + u32 mask, + u32 bits); void ilk_update_display_irq(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, bits); } static inline void -ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) +ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits) { ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + enum pipe pipe, u32 bits) { bdw_update_pipe_irq(dev_priv, pipe, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - uint32_t interrupt_mask, - uint32_t enabled_irq_mask); + u32 interrupt_mask, + u32 enabled_irq_mask); static inline void -ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, bits); } static inline void -ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) +ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) { ibx_display_interrupt_update(dev_priv, bits, 0); } @@ -2984,7 +2984,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle, uint64_t *offset); + u32 handle, u64 *offset); int i915_gem_mmap_gtt_version(void); void i915_gem_track_fb(struct drm_i915_gem_object *old, @@ -3125,7 +3125,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, - uint32_t *reg_state); + u32 *reg_state); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3377,10 +3377,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy); bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy); -uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count); +u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count); void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - uint8_t lane_lat_optim_mask); -uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); + u8 lane_lat_optim_mask); +u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); void chv_set_phy_signal_level(struct intel_encoder *encoder, u32 deemph_reg_value, u32 margin_reg_value, -- cgit v1.2.3 From d25236a3290ce621033a76e15bf74613acdb3a6d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:25 +0200 Subject: drm/i915/intel_drv.h: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Minor checkpatch fixes sprinkled on top of the changed lines. Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-8-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_drv.h | 94 ++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9ecff07598d9..2c1b3b1de9c2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -300,13 +300,12 @@ struct intel_panel { /* Connector and platform specific backlight functions */ int (*setup)(struct intel_connector *connector, enum pipe pipe); - uint32_t (*get)(struct intel_connector *connector); - void (*set)(const struct drm_connector_state *conn_state, uint32_t level); + u32 (*get)(struct intel_connector *connector); + void (*set)(const struct drm_connector_state *conn_state, u32 level); void (*disable)(const struct drm_connector_state *conn_state); void (*enable)(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); - uint32_t (*hz_to_pwm)(struct intel_connector *connector, - uint32_t hz); + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); void (*power)(struct intel_connector *, bool enable); } backlight; }; @@ -598,7 +597,7 @@ struct intel_initial_plane_config { struct intel_scaler { int in_use; - uint32_t mode; + u32 mode; }; struct intel_crtc_scaler_state { @@ -636,7 +635,7 @@ struct intel_crtc_scaler_state { struct intel_pipe_wm { struct intel_wm_level wm[5]; - uint32_t linetime; + u32 linetime; bool fbc_wm_enabled; bool pipe_enabled; bool sprites_enabled; @@ -652,7 +651,7 @@ struct skl_plane_wm { struct skl_pipe_wm { struct skl_plane_wm planes[I915_MAX_PLANES]; - uint32_t linetime; + u32 linetime; }; enum vlv_wm_level { @@ -665,7 +664,7 @@ enum vlv_wm_level { struct vlv_wm_state { struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS]; struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS]; - uint8_t num_levels; + u8 num_levels; bool cxsr; }; @@ -878,13 +877,13 @@ struct intel_crtc_state { /* Used by SDVO (and if we ever fix it, HDMI). */ unsigned pixel_multiplier; - uint8_t lane_count; + u8 lane_count; /* * Used by platforms having DP/HDMI PHY with programmable lane * latency optimization. */ - uint8_t lane_lat_optim_mask; + u8 lane_lat_optim_mask; /* minimum acceptable voltage level */ u8 min_voltage_level; @@ -928,7 +927,7 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; /* Gamma mode programmed on the pipe */ - uint32_t gamma_mode; + u32 gamma_mode; /* bitmask of visible planes (enum plane_id) */ u8 active_planes; @@ -1014,7 +1013,7 @@ struct intel_plane { enum pipe pipe; bool has_fbc; bool has_ccs; - uint32_t frontbuffer_bit; + u32 frontbuffer_bit; struct { u32 base, cntl, size; @@ -1110,9 +1109,9 @@ enum link_m_n_set { struct intel_dp_compliance_data { unsigned long edid; - uint8_t video_pattern; - uint16_t hdisplay, vdisplay; - uint8_t bpc; + u8 video_pattern; + u16 hdisplay, vdisplay; + u8 bpc; }; struct intel_dp_compliance { @@ -1125,18 +1124,18 @@ struct intel_dp_compliance { struct intel_dp { i915_reg_t output_reg; - uint32_t DP; + u32 DP; int link_rate; - uint8_t lane_count; - uint8_t sink_count; + u8 lane_count; + u8 sink_count; bool link_mst; bool link_trained; bool has_audio; bool reset_link_params; - uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; - uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; - uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; u8 fec_capable; /* source rates */ @@ -1156,7 +1155,7 @@ struct intel_dp { /* sink or branch descriptor */ struct drm_dp_desc desc; struct drm_dp_aux aux; - uint8_t train_set[4]; + u8 train_set[4]; int panel_power_up_delay; int panel_power_down_delay; int panel_power_cycle_delay; @@ -1198,14 +1197,13 @@ struct intel_dp { struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; struct drm_dp_mst_topology_mgr mst_mgr; - uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); + u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index); /* * This function returns the value we have to program the AUX_CTL * register with to kick off an AUX transaction. */ - uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, - int send_bytes, - uint32_t aux_clock_divider); + u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes, + u32 aux_clock_divider); i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp); i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index); @@ -1239,7 +1237,7 @@ struct intel_digital_port { struct intel_lspcon lspcon; enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; - uint8_t max_lanes; + u8 max_lanes; /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; @@ -1474,8 +1472,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv); void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ -void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv); @@ -1538,7 +1536,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); u32 bxt_signal_levels(struct intel_dp *intel_dp); -uint32_t ddi_signal_levels(struct intel_dp *intel_dp); +u32 ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing); @@ -1678,11 +1676,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, - uint64_t *val); + u64 *val); int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, - uint64_t val); + u64 val); int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct drm_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, @@ -1802,10 +1800,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count, + int link_rate, u8 lane_count, bool link_mst); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, - int link_rate, uint8_t lane_count); + int link_rate, u8 lane_count); void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); int intel_dp_retrain_link(struct intel_encoder *encoder, @@ -1837,7 +1835,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); -uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +u32 intel_dp_pack_aux(const u8 *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); @@ -1850,24 +1848,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, - uint8_t dp_train_pat); + u8 dp_train_pat); void intel_dp_set_signal_levels(struct intel_dp *intel_dp); void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); -uint8_t +u8 intel_dp_voltage_max(struct intel_dp *intel_dp); -uint8_t -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing); +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, - uint8_t *link_bw, uint8_t *rate_select); + u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool -intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); -uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, - int mode_clock, int mode_hdisplay); -uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, - int mode_hdisplay); +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]); +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, + int mode_clock, int mode_hdisplay); +u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, + int mode_hdisplay); /* intel_vdsc.c */ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, @@ -2325,11 +2323,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv); int intel_digital_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val); + u64 *val); int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val); + u64 val); int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_connector_state *new_state); struct drm_connector_state * -- cgit v1.2.3 From f1e9c90947979c041130011fbcd070200b5527b5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 19 Jan 2019 14:30:24 +0000 Subject: drm/i915: Prevent use of global_seqno=0 We are not allowed to assign rq->global_seqno=0 as it has a special meaning of "inactive" (not executing on HW). Fixes: 6faf5916e6be ("drm/i915: Remove HW semaphores for gen7 inter-engine synchronisation") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190119143024.26971-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 33eb9df0dd0e..c7ce27785cda 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -343,6 +343,13 @@ static void move_to_timeline(struct i915_request *request, spin_unlock(&request->timeline->lock); } +static u32 next_global_seqno(struct i915_timeline *tl) +{ + if (!++tl->seqno) + ++tl->seqno; + return tl->seqno; +} + void __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; @@ -359,7 +366,7 @@ void __i915_request_submit(struct i915_request *request) GEM_BUG_ON(request->global_seqno); - seqno = timeline_get_seqno(&engine->timeline); + seqno = next_global_seqno(&engine->timeline); GEM_BUG_ON(!seqno); GEM_BUG_ON(intel_engine_signaled(engine, seqno)); -- cgit v1.2.3 From ca0b04db14a51893322a2a4638a41dc79c2cf98a Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 1 Dec 2018 12:31:45 +0100 Subject: drm/i915/dsi: Fix pipe_bpp for handling for 6 bpc pixel-formats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are 3 problems with the dsi code's pipe_bpp handling for 6 bpc pixel-formats which this commit addresses: 1) It assumes that the pipe_bpp is the same as the bpp going over the dsi lanes. This assumption is not valid for MIPI_DSI_FMT_RGB666, where pipe_bpp should be 18 so that we do proper dithering but we actually send 24 bpp over the dsi lanes (MIPI_DSI_FMT_RGB666_PACKED sends 18 bpp). This assumption is enforced by an assert in *_dsi_get_pclk(). This assert triggers on the initial hw-state readback on BYT/CHT devices which use MIPI_DSI_FMT_RGB666, such as the Prowise PT301 tablet. PIPECONF is set to 6BPC / 18 bpp by the GOP, while mipi_dsi_pixel_format_to_bpp() returns 24. This commits switches the calculations in *_dsi_get_pclk() to use the bpp from mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format) which returns the bpp going over the mipi lanes and drops the assert. 2) On BXT bxt_dsi_get_pipe_config() wrongly overrides the pipe_bpp which i9xx_get_pipe_config() reads from PIPECONF with the return value from mipi_dsi_pixel_format_to_bpp(). This avoids the assert from 1. but is wrong since the pipe is actually running at the value configured in PIPECONF. This commit drops the override of pipe_bpp from bxt_dsi_get_pipe_config(). 3) The dsi encoder's compute_config() never assigns a value to pipe_bpp, unlike most other encoders. Falling back on compute_baseline_pipe_bpp() which always picks 24. 24 is only correct for MIPI_DSI_FMT_RGB88 for the others we should use 18 bpp so that we correctly do 6bpc color dithering. This commit adds code to intel_dsi_compute_config() to properly set pipe_bpp based on intel_dsi->pixel_format. Signed-off-by: Hans de Goede Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20181201113148.23184-1-hdegoede@redhat.com --- drivers/gpu/drm/i915/intel_dsi.h | 4 ++-- drivers/gpu/drm/i915/vlv_dsi.c | 17 +++++++++-------- drivers/gpu/drm/i915/vlv_dsi_pll.c | 31 ++++++------------------------- 3 files changed, 17 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index df3d390e25fe..a9a19778dc7f 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -173,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config); void vlv_dsi_pll_disable(struct intel_encoder *encoder); -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config); void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); @@ -183,7 +183,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config); void bxt_dsi_pll_disable(struct intel_encoder *encoder); -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config); void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index c247ce74b71a..54cbd8eb1718 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -289,6 +289,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; + if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) + pipe_config->pipe_bpp = 24; + else + pipe_config->pipe_bpp = 18; + if (IS_GEN9_LP(dev_priv)) { /* Enable Frame time stamp based scanline reporting */ adjusted_mode->private_flags |= @@ -1059,10 +1064,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, } fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; - pipe_config->pipe_bpp = - mipi_dsi_pixel_format_to_bpp( - pixel_format_from_register_bits(fmt)); - bpp = pipe_config->pipe_bpp; + bpp = mipi_dsi_pixel_format_to_bpp( + pixel_format_from_register_bits(fmt)); /* Enable Frame time stamo based scanline reporting */ adjusted_mode->private_flags |= @@ -1200,11 +1203,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, if (IS_GEN9_LP(dev_priv)) { bxt_dsi_get_pipe_config(encoder, pipe_config); - pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); + pclk = bxt_dsi_get_pclk(encoder, pipe_config); } else { - pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); + pclk = vlv_dsi_get_pclk(encoder, pipe_config); } if (pclk) { diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index a132a8037ecc..954d5a8c4fa7 100644 --- a/drivers/gpu/drm/i915/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -252,20 +252,12 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); } -static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) -{ - int bpp = mipi_dsi_pixel_format_to_bpp(fmt); - - WARN(bpp != pipe_bpp, - "bpp match assertion failure (expected %d, current %d)\n", - bpp, pipe_bpp); -} - -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock, pclk; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; @@ -319,15 +311,12 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, dsi_clock = (m * refclk) / (p * n); - /* pixel_format and pipe_bpp should agree */ - assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); - - pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp); + pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); return pclk; } -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { u32 pclk; @@ -335,12 +324,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, u32 dsi_ratio; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - /* Divide by zero */ - if (!pipe_bpp) { - DRM_ERROR("Invalid BPP(0)\n"); - return 0; - } + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL); @@ -348,10 +332,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - /* pixel_format and pipe_bpp should agree */ - assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); - - pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp); + pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk); return pclk; -- cgit v1.2.3 From 24bf86ccf9f5563f7d288f8b7672b0facdefbc95 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 1 Dec 2018 12:31:46 +0100 Subject: drm/i915/dsi: Enable dithering for 6 bpc panels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The display engine has 2 dithering enable bits which both need to be set for dithering to happen, 1 in the PIPECONF register which is taken care of by i9xx_set_pipeconf() and a second bit at the encoder level. The dsi code was not setting the encoder level dithering enable bit causing dithering to be disabled, this commit fixes this. Signed-off-by: Hans de Goede Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20181201113148.23184-2-hdegoede@redhat.com --- drivers/gpu/drm/i915/vlv_dsi.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 54cbd8eb1718..4d47910e5184 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -678,6 +678,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, LANE_CONFIGURATION_DUAL_LINK_B : LANE_CONFIGURATION_DUAL_LINK_A; } + + if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888) + temp |= DITHERING_ENABLE; + /* assert ip_tg_enable signal */ I915_WRITE(port_ctrl, temp | DPI_ENABLE); POSTING_READ(port_ctrl); -- cgit v1.2.3 From 480cd6dd9287ac8276c802475bacb3138afd8e04 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:20:48 +0000 Subject: drm/i915/selftests: Track evict objects explicitly During review of commit 71fc448c1aaf ("drm/i915/selftests: Make evict tolerant of foreign objects"), Matthew mentioned it would be better if we explicitly tracked the objects we created. We have an obj->st_link hook for this purpose, so add the corresponding list of objects and reduce our loops to only consider our own list. References: 71fc448c1aaf ("drm/i915/selftests: Make evict tolerant of foreign objects") Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 114 ++++++++++++------------ 1 file changed, 55 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 543d618c152b..d0553bc69705 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -29,25 +29,21 @@ #include "mock_drm.h" #include "mock_gem_device.h" -static int populate_ggtt(struct drm_i915_private *i915) +static void quirk_add(struct drm_i915_gem_object *obj, + struct list_head *objects) +{ + /* quirk is only for live tiled objects, use it to declare ownership */ + GEM_BUG_ON(obj->mm.quirked); + obj->mm.quirked = true; + list_add(&obj->st_link, objects); +} + +static int populate_ggtt(struct drm_i915_private *i915, + struct list_head *objects) { - struct drm_i915_gem_object *obj, *on; - unsigned long expected_unbound, expected_bound; unsigned long unbound, bound, count; + struct drm_i915_gem_object *obj; u64 size; - int err; - - expected_unbound = 0; - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { - i915_gem_object_get(obj); - expected_unbound++; - } - - expected_bound = 0; - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { - i915_gem_object_get(obj); - expected_bound++; - } count = 0; for (size = 0; @@ -56,38 +52,36 @@ static int populate_ggtt(struct drm_i915_private *i915) struct i915_vma *vma; obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto cleanup; - } + if (IS_ERR(obj)) + return PTR_ERR(obj); + + quirk_add(obj, objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto cleanup; - } + if (IS_ERR(vma)) + return PTR_ERR(vma); count++; } unbound = 0; list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - unbound++; - if (unbound != expected_unbound) { - pr_err("%s: Found %lu objects unbound, expected %lu!\n", - __func__, unbound, expected_unbound); - err = -EINVAL; - goto cleanup; + if (obj->mm.quirked) + unbound++; + if (unbound) { + pr_err("%s: Found %lu objects unbound, expected %u!\n", + __func__, unbound, 0); + return -EINVAL; } bound = 0; list_for_each_entry(obj, &i915->mm.bound_list, mm.link) - bound++; - if (bound != expected_bound + count) { + if (obj->mm.quirked) + bound++; + if (bound != count) { pr_err("%s: Found %lu objects bound, expected %lu!\n", - __func__, bound, expected_bound + count); - err = -EINVAL; - goto cleanup; + __func__, bound, count); + return -EINVAL; } if (list_empty(&i915->ggtt.vm.inactive_list)) { @@ -96,15 +90,6 @@ static int populate_ggtt(struct drm_i915_private *i915) } return 0; - -cleanup: - list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link) - i915_gem_object_put(obj); - - list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link) - i915_gem_object_put(obj); - - return err; } static void unpin_ggtt(struct drm_i915_private *i915) @@ -112,18 +97,20 @@ static void unpin_ggtt(struct drm_i915_private *i915) struct i915_vma *vma; list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) - i915_vma_unpin(vma); + if (vma->obj->mm.quirked) + i915_vma_unpin(vma); } -static void cleanup_objects(struct drm_i915_private *i915) +static void cleanup_objects(struct drm_i915_private *i915, + struct list_head *list) { struct drm_i915_gem_object *obj, *on; - list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link) - i915_gem_object_put(obj); - - list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link) + list_for_each_entry_safe(obj, on, list, st_link) { + GEM_BUG_ON(!obj->mm.quirked); + obj->mm.quirked = false; i915_gem_object_put(obj); + } mutex_unlock(&i915->drm.struct_mutex); @@ -136,11 +123,12 @@ static int igt_evict_something(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict one. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -169,7 +157,7 @@ static int igt_evict_something(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -178,13 +166,14 @@ static int igt_overcommit(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and then try to pin one more. * We expect it to fail. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -194,6 +183,8 @@ static int igt_overcommit(void *arg) goto cleanup; } + quirk_add(obj, &objects); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) { pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma)); @@ -202,7 +193,7 @@ static int igt_overcommit(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -214,11 +205,12 @@ static int igt_evict_for_vma(void *arg) .start = 0, .size = 4096, }; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict a range. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -241,7 +233,7 @@ static int igt_evict_for_vma(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } @@ -264,6 +256,7 @@ static int igt_evict_for_cache_color(void *arg) }; struct drm_i915_gem_object *obj; struct i915_vma *vma; + LIST_HEAD(objects); int err; /* Currently the use of color_adjust is limited to cache domains within @@ -279,6 +272,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, I915_GTT_PAGE_SIZE | flags); @@ -294,6 +288,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + quirk_add(obj, &objects); /* Neighbouring; same colour - should fit */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -329,7 +324,7 @@ static int igt_evict_for_cache_color(void *arg) cleanup: unpin_ggtt(i915); - cleanup_objects(i915); + cleanup_objects(i915, &objects); ggtt->vm.mm.color_adjust = NULL; return err; } @@ -338,11 +333,12 @@ static int igt_evict_vm(void *arg) { struct drm_i915_private *i915 = arg; struct i915_ggtt *ggtt = &i915->ggtt; + LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict everything. */ - err = populate_ggtt(i915); + err = populate_ggtt(i915, &objects); if (err) goto cleanup; @@ -364,7 +360,7 @@ static int igt_evict_vm(void *arg) } cleanup: - cleanup_objects(i915); + cleanup_objects(i915, &objects); return err; } -- cgit v1.2.3 From c95e7ce387f97df6f7e61e08a35f97a8d74e5ee1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:20:49 +0000 Subject: drm/i915/selftests: Create a clean GGTT for vma/gtt selftesting Some tests (e.g. igt_vma_pin1) presume that we have a completely clean GGTT so that it can probe boundaries without fear that something is already allocated there. However, the mock device is starting to get complicated and following similar rules to the live device, i.e. we can't guarantee that i915->ggtt remains clean, so create a temporary address_space equivalent to the mock ggtt for the purpose. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-7-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 108 ++++++++++++++--------- drivers/gpu/drm/i915/selftests/i915_vma.c | 77 +++++++++------- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 4 +- drivers/gpu/drm/i915/selftests/mock_gtt.c | 9 +- drivers/gpu/drm/i915/selftests/mock_gtt.h | 4 +- 5 files changed, 114 insertions(+), 88 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index fea8ab14e79d..06bde4a273cb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1267,27 +1267,35 @@ static int exercise_mock(struct drm_i915_private *i915, static int igt_mock_fill(void *arg) { - return exercise_mock(arg, fill_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, fill_hole); } static int igt_mock_walk(void *arg) { - return exercise_mock(arg, walk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, walk_hole); } static int igt_mock_pot(void *arg) { - return exercise_mock(arg, pot_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, pot_hole); } static int igt_mock_drunk(void *arg) { - return exercise_mock(arg, drunk_hole); + struct i915_ggtt *ggtt = arg; + + return exercise_mock(ggtt->vm.i915, drunk_hole); } static int igt_gtt_reserve(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; LIST_HEAD(objects); u64 total; @@ -1300,11 +1308,12 @@ static int igt_gtt_reserve(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1318,20 +1327,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1349,11 +1358,12 @@ static int igt_gtt_reserve(void *arg) /* Now we start forcing evictions */ for (total = I915_GTT_PAGE_SIZE; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1367,20 +1377,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1401,7 +1411,7 @@ static int igt_gtt_reserve(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1413,18 +1423,18 @@ static int igt_gtt_reserve(void *arg) goto out; } - offset = random_offset(0, i915->ggtt.vm.total, + offset = random_offset(0, ggtt->vm.total, 2*I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, offset, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1450,7 +1460,7 @@ out: static int igt_gtt_insert(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; struct drm_mm_node tmp = {}; const struct invalid_insert { @@ -1459,8 +1469,8 @@ static int igt_gtt_insert(void *arg) u64 start, end; } invalid_insert[] = { { - i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0, - 0, i915->ggtt.vm.total, + ggtt->vm.total + I915_GTT_PAGE_SIZE, 0, + 0, ggtt->vm.total, }, { 2*I915_GTT_PAGE_SIZE, 0, @@ -1490,7 +1500,7 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { - err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp, + err = i915_gem_gtt_insert(&ggtt->vm, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1505,11 +1515,12 @@ static int igt_gtt_insert(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; + total + I915_GTT_PAGE_SIZE <= ggtt->vm.total; total += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1523,15 +1534,15 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err == -ENOSPC) { /* maxed out the GGTT space */ @@ -1540,7 +1551,7 @@ static int igt_gtt_insert(void *arg) } if (err) { pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1552,7 +1563,7 @@ static int igt_gtt_insert(void *arg) list_for_each_entry(obj, &objects, st_link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1572,7 +1583,7 @@ static int igt_gtt_insert(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1587,13 +1598,13 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1609,11 +1620,12 @@ static int igt_gtt_insert(void *arg) /* And then force evictions */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; - total += 2*I915_GTT_PAGE_SIZE) { + total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; + total += 2 * I915_GTT_PAGE_SIZE) { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; @@ -1627,19 +1639,19 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, + err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.vm.total, err); + total, ggtt->vm.total, err); goto out; } track_vma_bind(vma); @@ -1666,17 +1678,25 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; + struct i915_ggtt ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + mock_init_ggtt(i915, &ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, &ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(&ggtt); drm_dev_put(&i915->drm); + return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index ffa74290e054..f0a32edfb9b1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -28,6 +28,7 @@ #include "mock_gem_device.h" #include "mock_context.h" +#include "mock_gtt.h" static bool assert_vma(struct i915_vma *vma, struct drm_i915_gem_object *obj, @@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915, static int igt_vma_create(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; + struct drm_i915_private *i915 = ggtt->vm.i915; struct drm_i915_gem_object *obj, *on; struct i915_gem_context *ctx, *cn; unsigned long num_obj, num_ctx; @@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma, static int igt_vma_pin1(void *arg) { - struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = arg; const struct pin_mode modes[] = { #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } @@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg) VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), - - VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), - VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), - INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), + + VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end), + VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), + INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), VALID(4096, PIN_GLOBAL), VALID(8192, PIN_GLOBAL), - VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), - NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL), - VALID(i915->ggtt.vm.total, PIN_GLOBAL), - NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL), + VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE), + NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), + VALID(ggtt->vm.total - 4096, PIN_GLOBAL), + VALID(ggtt->vm.total, PIN_GLOBAL), + NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), - INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), - VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), + VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) /* Misusing BIAS is a programming error (it is not controllable @@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg) * However, the tests are still quite interesting for checking * variable start, end and size. */ - NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), - NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total), - NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), + NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end), + NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total), + NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)), + NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)), #endif { }, #undef NOSPACE @@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg) * focusing on error handling of boundary conditions. */ - GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm)); + GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm)); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); - vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = checked_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) goto out; @@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a, static int igt_vma_rotate(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; const struct intel_rotation_plane_info planes[] = { { .width = 1, .height = 1, .stride = 1 }, @@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg) * that the page layout within the rotated VMA match our expectations. */ - obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma, static int igt_vma_partial(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.vm; + struct i915_ggtt *ggtt = arg; + struct i915_address_space *vm = &ggtt->vm; const unsigned int npages = 1021; /* prime! */ struct drm_i915_gem_object *obj; const struct phase { @@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg) * we are returned the same VMA when we later request the same range. */ - obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE); if (IS_ERR(obj)) goto out; @@ -723,17 +725,24 @@ int i915_vma_mock_selftests(void) SUBTEST(igt_vma_partial), }; struct drm_i915_private *i915; + struct i915_ggtt ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; + mock_init_ggtt(i915, &ggtt); + mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); + err = i915_subtests(tests, &ggtt); + mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + + mock_fini_ggtt(&ggtt); drm_dev_put(&i915->drm); + return err; } - diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 3cda66292e76..5477ad4a7e7d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -72,7 +72,7 @@ static void mock_device_release(struct drm_device *dev) i915_gem_drain_freed_objects(i915); mutex_lock(&i915->drm.struct_mutex); - mock_fini_ggtt(i915); + mock_fini_ggtt(&i915->ggtt); mutex_unlock(&i915->drm.struct_mutex); WARN_ON(!list_empty(&i915->gt.timelines)); @@ -232,7 +232,7 @@ struct drm_i915_private *mock_gem_device(void) mutex_lock(&i915->drm.struct_mutex); - mock_init_ggtt(i915); + mock_init_ggtt(i915, &i915->ggtt); mkwrite_device_info(i915)->ring_mask = BIT(0); i915->kernel_context = mock_context(i915, NULL); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 976c862b3842..cd83929fde8e 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -97,9 +97,9 @@ static void mock_unbind_ggtt(struct i915_vma *vma) { } -void mock_init_ggtt(struct drm_i915_private *i915) +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; + memset(ggtt, 0, sizeof(*ggtt)); ggtt->vm.i915 = i915; ggtt->vm.is_ggtt = true; @@ -118,13 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); } -void mock_fini_ggtt(struct drm_i915_private *i915) +void mock_fini_ggtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; - i915_address_space_fini(&ggtt->vm); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h index 9a0a833bb545..40d544bde1d5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.h +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h @@ -25,8 +25,8 @@ #ifndef __MOCK_GTT_H #define __MOCK_GTT_H -void mock_init_ggtt(struct drm_i915_private *i915); -void mock_fini_ggtt(struct drm_i915_private *i915); +void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt); +void mock_fini_ggtt(struct i915_ggtt *ggtt); struct i915_hw_ppgtt * mock_ppgtt(struct drm_i915_private *i915, -- cgit v1.2.3 From e4a8c8130ba3ac5566c96c0dd79d7a3988fc13ab Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:20:47 +0000 Subject: drm/i915/selftests: Refactor common live_test framework Before adding yet another copy of struct live_test and its handler, refactor the existing code into a common framework for live selftests. For many live selftests, we want to know if the GPU hung or otherwise misbehaved during the execution of the test (beyond any infraction in the behaviour under test), live_test provides this by comparing the GPU state before and after, alerting if it unexpectedly changed (e.g. the reset counter changed). It also ensures that the GPU is idle before and after the test, so that residual code running on the GPU is flushed before testing. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/selftests/i915_gem_context.c | 103 +++------------------- drivers/gpu/drm/i915/selftests/i915_request.c | 86 +++--------------- drivers/gpu/drm/i915/selftests/igt_live_test.c | 85 ++++++++++++++++++ drivers/gpu/drm/i915/selftests/igt_live_test.h | 35 ++++++++ 5 files changed, 147 insertions(+), 163 deletions(-) create mode 100644 drivers/gpu/drm/i915/selftests/igt_live_test.c create mode 100644 drivers/gpu/drm/i915/selftests/igt_live_test.h (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 611115ed00db..f050759686ca 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -167,6 +167,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/i915_random.o \ selftests/i915_selftest.o \ selftests/igt_flush_test.o \ + selftests/igt_live_test.o \ selftests/igt_reset.o \ selftests/igt_spinner.o diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 4cba50679607..e2c1f0bc2abe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -27,6 +27,7 @@ #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_live_test.h" #include "mock_drm.h" #include "mock_gem_device.h" @@ -34,84 +35,6 @@ #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; - - unsigned int reset_global; - unsigned int reset_engine[I915_NUM_ENGINES]; -}; - -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - t->i915 = i915; - t->func = func; - t->name = name; - - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; - } - - i915->gpu_error.missed_irq_rings = 0; - t->reset_global = i915_reset_count(&i915->gpu_error); - - for_each_engine(engine, i915, id) - t->reset_engine[id] = - i915_reset_engine_count(&i915->gpu_error, engine); - - return 0; -} - -static int end_live_test(struct live_test *t) -{ - struct drm_i915_private *i915 = t->i915; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - return -EIO; - - if (t->reset_global != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_global); - return -EIO; - } - - for_each_engine(engine, i915, id) { - if (t->reset_engine[id] == - i915_reset_engine_count(&i915->gpu_error, engine)) - continue; - - pr_err("%s(%s): engine '%s' was reset %d times!\n", - t->func, t->name, engine->name, - i915_reset_engine_count(&i915->gpu_error, engine) - - t->reset_engine[id]); - return -EIO; - } - - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; - } - - return 0; -} - static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; @@ -120,8 +43,8 @@ static int live_nop_switch(void *arg) struct i915_gem_context **ctx; enum intel_engine_id id; intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; - struct live_test t; unsigned long n; int err = -ENODEV; @@ -185,7 +108,7 @@ static int live_nop_switch(void *arg) pr_info("Populated %d contexts on %s in %lluns\n", nctx, engine->name, ktime_to_ns(times[1] - times[0])); - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -233,7 +156,7 @@ static int live_nop_switch(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -554,10 +477,10 @@ static int igt_ctx_exec(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; + struct igt_live_test t; struct drm_file *file; IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct live_test t; int err = -ENODEV; /* @@ -575,7 +498,7 @@ static int igt_ctx_exec(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -645,7 +568,7 @@ static int igt_ctx_exec(void *arg) } out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -660,11 +583,11 @@ static int igt_ctx_readonly(void *arg) struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; unsigned long ndwords, dw; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct live_test t; int err = -ENODEV; /* @@ -679,7 +602,7 @@ static int igt_ctx_readonly(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -757,7 +680,7 @@ static int igt_ctx_readonly(void *arg) } out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -982,10 +905,10 @@ static int igt_vm_isolation(void *arg) struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; intel_wakeref_t wakeref; + struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); unsigned long count; - struct live_test t; unsigned int id; u64 vm_total; int err; @@ -1004,7 +927,7 @@ static int igt_vm_isolation(void *arg) mutex_lock(&i915->drm.struct_mutex); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -1075,7 +998,7 @@ static int igt_vm_isolation(void *arg) out_rpm: intel_runtime_pm_put(i915, wakeref); out_unlock: - if (end_live_test(&t)) + if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 2e14d6d3bad7..4d4b86b5fa11 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -25,6 +25,7 @@ #include #include "../i915_selftest.h" +#include "igt_live_test.h" #include "mock_context.h" #include "mock_gem_device.h" @@ -270,73 +271,12 @@ int i915_request_mock_selftests(void) return err; } -struct live_test { - struct drm_i915_private *i915; - const char *func; - const char *name; - - unsigned int reset_count; -}; - -static int begin_live_test(struct live_test *t, - struct drm_i915_private *i915, - const char *func, - const char *name) -{ - int err; - - t->i915 = i915; - t->func = func; - t->name = name; - - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) { - pr_err("%s(%s): failed to idle before, with err=%d!", - func, name, err); - return err; - } - - i915->gpu_error.missed_irq_rings = 0; - t->reset_count = i915_reset_count(&i915->gpu_error); - - return 0; -} - -static int end_live_test(struct live_test *t) -{ - struct drm_i915_private *i915 = t->i915; - - i915_retire_requests(i915); - - if (wait_for(intel_engines_are_idle(i915), 10)) { - pr_err("%s(%s): GPU not idle\n", t->func, t->name); - return -EIO; - } - - if (t->reset_count != i915_reset_count(&i915->gpu_error)) { - pr_err("%s(%s): GPU was reset %d times!\n", - t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_count); - return -EIO; - } - - if (i915->gpu_error.missed_irq_rings) { - pr_err("%s(%s): Missed interrupts on engines %lx\n", - t->func, t->name, i915->gpu_error.missed_irq_rings); - return -EIO; - } - - return 0; -} - static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; intel_wakeref_t wakeref; - struct live_test t; + struct igt_live_test t; unsigned int id; int err = -ENODEV; @@ -354,7 +294,7 @@ static int live_nop_request(void *arg) IGT_TIMEOUT(end_time); ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_unlock; @@ -396,7 +336,7 @@ static int live_nop_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_unlock; @@ -483,8 +423,8 @@ static int live_empty_request(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; - struct live_test t; unsigned int id; int err = 0; @@ -508,7 +448,7 @@ static int live_empty_request(void *arg) unsigned long n, prime; ktime_t times[2] = {}; - err = begin_live_test(&t, i915, __func__, engine->name); + err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_batch; @@ -544,7 +484,7 @@ static int live_empty_request(void *arg) break; } - err = end_live_test(&t); + err = igt_live_test_end(&t); if (err) goto out_batch; @@ -643,8 +583,8 @@ static int live_all_engines(void *arg) struct intel_engine_cs *engine; struct i915_request *request[I915_NUM_ENGINES]; intel_wakeref_t wakeref; + struct igt_live_test t; struct i915_vma *batch; - struct live_test t; unsigned int id; int err; @@ -656,7 +596,7 @@ static int live_all_engines(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -728,7 +668,7 @@ static int live_all_engines(void *arg) request[id] = NULL; } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) @@ -749,7 +689,7 @@ static int live_sequential_engines(void *arg) struct i915_request *prev = NULL; struct intel_engine_cs *engine; intel_wakeref_t wakeref; - struct live_test t; + struct igt_live_test t; unsigned int id; int err; @@ -762,7 +702,7 @@ static int live_sequential_engines(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(i915); - err = begin_live_test(&t, i915, __func__, ""); + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; @@ -845,7 +785,7 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(!i915_request_completed(request[id])); } - err = end_live_test(&t); + err = igt_live_test_end(&t); out_request: for_each_engine(engine, i915, id) { diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c new file mode 100644 index 000000000000..5deb485fb942 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_drv.h" + +#include "../i915_selftest.h" +#include "igt_flush_test.h" +#include "igt_live_test.h" + +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + lockdep_assert_held(&i915->drm.struct_mutex); + + t->i915 = i915; + t->func = func; + t->name = name; + + err = i915_gem_wait_for_idle(i915, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) { + pr_err("%s(%s): failed to idle before, with err=%d!", + func, name, err); + return err; + } + + i915->gpu_error.missed_irq_rings = 0; + t->reset_global = i915_reset_count(&i915->gpu_error); + + for_each_engine(engine, i915, id) + t->reset_engine[id] = + i915_reset_engine_count(&i915->gpu_error, engine); + + return 0; +} + +int igt_live_test_end(struct igt_live_test *t) +{ + struct drm_i915_private *i915 = t->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + return -EIO; + + if (t->reset_global != i915_reset_count(&i915->gpu_error)) { + pr_err("%s(%s): GPU was reset %d times!\n", + t->func, t->name, + i915_reset_count(&i915->gpu_error) - t->reset_global); + return -EIO; + } + + for_each_engine(engine, i915, id) { + if (t->reset_engine[id] == + i915_reset_engine_count(&i915->gpu_error, engine)) + continue; + + pr_err("%s(%s): engine '%s' was reset %d times!\n", + t->func, t->name, engine->name, + i915_reset_engine_count(&i915->gpu_error, engine) - + t->reset_engine[id]); + return -EIO; + } + + if (i915->gpu_error.missed_irq_rings) { + pr_err("%s(%s): Missed interrupts on engines %lx\n", + t->func, t->name, i915->gpu_error.missed_irq_rings); + return -EIO; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h new file mode 100644 index 000000000000..c0e9f99d50de --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef IGT_LIVE_TEST_H +#define IGT_LIVE_TEST_H + +#include "../i915_gem.h" + +struct drm_i915_private; + +struct igt_live_test { + struct drm_i915_private *i915; + const char *func; + const char *name; + + unsigned int reset_global; + unsigned int reset_engine[I915_NUM_ENGINES]; +}; + +/* + * Flush the GPU state before and after the test to ensure that no residual + * code is running on the GPU that may affect this test. Also compare the + * state before and after the test and alert if it unexpectedly changes, + * e.g. if the GPU was reset. + */ +int igt_live_test_begin(struct igt_live_test *t, + struct drm_i915_private *i915, + const char *func, + const char *name); +int igt_live_test_end(struct igt_live_test *t); + +#endif /* IGT_LIVE_TEST_H */ -- cgit v1.2.3 From 1579ab2de9147c50b00b35d9e7a0a66c86dffe13 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:21:01 +0000 Subject: drm/i915/selftests: Use common mock_engine::advance Replace the open-coding of advance with a call instead. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-19-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/mock_engine.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 8b8d51af7d6a..442ec2aeec81 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -67,11 +67,10 @@ static struct mock_request *first_request(struct mock_engine *engine) link); } -static void advance(struct mock_engine *engine, - struct mock_request *request) +static void advance(struct mock_request *request) { list_del_init(&request->link); - mock_seqno_advance(&engine->base, request->base.global_seqno); + mock_seqno_advance(request->base.engine, request->base.global_seqno); } static void hw_delay_complete(struct timer_list *t) @@ -84,7 +83,7 @@ static void hw_delay_complete(struct timer_list *t) /* Timer fired, first request is complete */ request = first_request(engine); if (request) - advance(engine, request); + advance(request); /* * Also immediately signal any subsequent 0-delay requests, but @@ -96,7 +95,7 @@ static void hw_delay_complete(struct timer_list *t) break; } - advance(engine, request); + advance(request); } spin_unlock(&engine->hw_lock); @@ -180,7 +179,7 @@ static void mock_submit_request(struct i915_request *request) if (mock->delay) mod_timer(&engine->hw_delay, jiffies + mock->delay); else - advance(engine, mock); + advance(mock); } spin_unlock_irq(&engine->hw_lock); } @@ -240,10 +239,8 @@ void mock_engine_flush(struct intel_engine_cs *engine) del_timer_sync(&mock->hw_delay); spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, link) { - list_del_init(&request->link); - mock_seqno_advance(&mock->base, request->base.global_seqno); - } + list_for_each_entry_safe(request, rn, &mock->hw_queue, link) + advance(request); spin_unlock_irq(&mock->hw_lock); } -- cgit v1.2.3 From 924090f4237bc34f32bb1992bbf334af76e64a29 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:20:50 +0000 Subject: drm/i915: Refactor out intel_context_init() Prior to adding a third instance of intel_context_init() and extending the information stored therewithin, refactor out the common assignments. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 7 ++----- drivers/gpu/drm/i915/i915_gem_context.h | 8 ++++++++ drivers/gpu/drm/i915/selftests/mock_context.c | 7 ++----- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b68b4345d7be..71d5ca059ee6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -338,11 +338,8 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->i915 = dev_priv; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; - - ce->gem_context = ctx; - } + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) + intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index f6d870b1f73e..47d82ce7ba6a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -364,4 +364,12 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) kref_put(&ctx->ref, i915_gem_context_release); } +static inline void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + ce->gem_context = ctx; +} + #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index d937bdff26f9..b646cdcdd602 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -45,11 +45,8 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; - - ce->gem_context = ctx; - } + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) + intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]); ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) -- cgit v1.2.3 From 0e21834e18c545bdebed527209a7b6bb8aed9f9b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:21:02 +0000 Subject: drm/i915: Tidy common test_bit probing of i915_request->fence.flags A repeated pattern is to test the signaled bit of our request->fence.flags. Make this an inline to shorten a few lines and remove unnecessary line continuations. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-20-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 3 +-- drivers/gpu/drm/i915/i915_request.c | 2 +- drivers/gpu/drm/i915/i915_request.h | 5 +++++ drivers/gpu/drm/i915/intel_breadcrumbs.c | 3 +-- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 3 +-- 7 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1abfc3fa76ad..5fd5080c4ccb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1182,8 +1182,7 @@ static void notify_ring(struct intel_engine_cs *engine) struct i915_request *waiter = wait->request; if (waiter && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &waiter->fence.flags) && + !i915_request_signaled(waiter) && intel_wait_check_request(wait, waiter)) rq = i915_request_get(waiter); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c7ce27785cda..426194ee978a 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -198,7 +198,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, spin_unlock(&engine->timeline.lock); spin_lock(&rq->lock); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + if (!i915_request_signaled(rq)) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) intel_engine_cancel_signaling(rq); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index d014b0605445..c0f084ca4f29 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -280,6 +280,11 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */ #define I915_WAIT_FOR_IDLE_BOOST BIT(4) +static inline bool i915_request_signaled(const struct i915_request *rq) +{ + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); +} + static inline bool intel_engine_has_started(struct intel_engine_cs *engine, u32 seqno); static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 4fad93fe3678..b58915b8708b 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -631,8 +631,7 @@ static int intel_breadcrumbs_signaler(void *arg) rq->signaling.wait.seqno = 0; __list_del_entry(&rq->signaling.link); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &rq->fence.flags)) { + if (!i915_request_signaled(rq)) { list_add_tail(&rq->signaling.link, &list); i915_request_get(rq); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index f0fa0f767eb6..382a1262f75a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -816,7 +816,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) list_for_each_entry(rq, &engine->timeline.requests, link) { GEM_BUG_ON(!rq->global_seqno); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + if (i915_request_signaled(rq)) continue; dma_fence_set_error(&rq->fence, -EIO); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8b63afa3a221..fdc28a3d2936 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6662,7 +6662,7 @@ void gen6_rps_boost(struct i915_request *rq, if (!rps->enabled) return; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + if (i915_request_signaled(rq)) return; /* Serializes with i915_request_retire() */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 26b7274a2d43..e39e483d8d16 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -836,8 +836,7 @@ static void cancel_requests(struct intel_engine_cs *engine) list_for_each_entry(request, &engine->timeline.requests, link) { GEM_BUG_ON(!request->global_seqno); - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &request->fence.flags)) + if (i915_request_signaled(request)) continue; dma_fence_set_error(&request->fence, -EIO); -- cgit v1.2.3 From 25f9cebd7a52ddf15405d74fb5fd4c374f301983 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Jan 2019 22:20:46 +0000 Subject: drm/i915: Show all active engines on hangcheck This turns out to be quite useful if one happens to be debugging semaphore deadlocks. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190121222117.23305-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_hangcheck.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 7dc11fcb13de..741441daae32 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -195,10 +195,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, break; case ENGINE_DEAD: - if (GEM_SHOW_DEBUG()) { - struct drm_printer p = drm_debug_printer("hangcheck"); - intel_engine_dump(engine, &p, "%s\n", engine->name); - } break; default: @@ -285,6 +281,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work) wedged |= intel_engine_flag(engine); } + if (GEM_SHOW_DEBUG() && (hung | stuck)) { + struct drm_printer p = drm_debug_printer("hangcheck"); + + for_each_engine(engine, dev_priv, id) { + if (intel_engine_is_idle(engine)) + continue; + + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + } + if (wedged) { dev_err(dev_priv->drm.dev, "GPU recovery timed out," -- cgit v1.2.3 From 235ca26fc799d78522ea00dc13c54b3c3488151a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 17 Jan 2019 12:55:45 -0800 Subject: drm/i915/psr: Allow PSR2 to be enabled when debugfs asks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For now PSR2 is still disabled by default for all platforms but is our intention to let debugfs to enable it for debug and tests proporses, so intel_psr2_enabled() that is also used by debugfs to decide if PSR2 is going to be enabled needs to take in consideration the debug field. v2: Using the switch/case that intel_psr2_enabled() already had to handle this(DK) Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Reviewed-by: Dhinakaran Pandiyan Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190117205548.28378-1-jose.souza@intel.com --- drivers/gpu/drm/i915/intel_psr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 8dbf26c212cc..84a0fb981561 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -70,17 +70,17 @@ static bool psr_global_enabled(u32 debug) static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { - /* Disable PSR2 by default for all platforms */ - if (i915_modparams.enable_psr == -1) - return false; - /* Cannot enable DSC and PSR2 simultaneously */ WARN_ON(crtc_state->dsc_params.compression_enable && crtc_state->has_psr2); switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; + case I915_PSR_DEBUG_DEFAULT: + if (i915_modparams.enable_psr <= 0) + return false; default: return crtc_state->has_psr2; } -- cgit v1.2.3 From 47c6cd54efde71b0e904cd41593978c109660430 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 17 Jan 2019 12:55:46 -0800 Subject: drm/i915: Refactor PSR status debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The old debugfs fields was not following a naming partern and it was a bit confusing. So it went from: ~$ sudo more /sys/kernel/debug/dri/0/i915_edp_psr_status Sink_Support: yes PSR mode: PSR1 Enabled: yes Busy frontbuffer bits: 0x000 Main link in standby mode: no HW Enabled & Active bit: yes Source PSR status: 0x24050006 [SRDONACK] To: ~$ sudo more /sys/kernel/debug/dri/0/i915_edp_psr_status Sink support: yes [0x03] PSR mode: PSR1 enabled Source PSR ctl: enabled [0x81f00e26] Source PSR status: IDLE [0x04010006] Busy frontbuffer bits: 0x00000000 The 'Main link in standby mode' was removed as it is not useful but if needed by someone the information is still in the register value of 'Source PSR ctl' inside of the brackets, PSR mode and Enabled was squashed into PSR mode, some renames and reorders and we have this cleaner version. This will also make easy to parse debugfs for IGT tests. v2: Printing sink PSR version with only 2 hex digits as it is a byte Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Suggested-by: Dhinakaran Pandiyan Reviewed-by: Dhinakaran Pandiyan Acked-by: Rodrigo Vivi Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190117205548.28378-2-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 98 +++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 24d6d4ce14ef..fb59874fed99 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2506,7 +2506,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); static void psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) { - u32 val, psr_status; + u32 val, status_val; + const char *status = "unknown"; if (dev_priv->psr.psr2_enabled) { static const char * const live_status[] = { @@ -2522,14 +2523,11 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "BUF_ON", "TG_ON" }; - psr_status = I915_READ(EDP_PSR2_STATUS); - val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) { - seq_printf(m, "Source PSR status: 0x%x [%s]\n", - psr_status, live_status[val]); - return; - } + val = I915_READ(EDP_PSR2_STATUS); + status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; } else { static const char * const live_status[] = { "IDLE", @@ -2541,75 +2539,79 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - psr_status = I915_READ(EDP_PSR_STATUS); - val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >> - EDP_PSR_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) { - seq_printf(m, "Source PSR status: 0x%x [%s]\n", - psr_status, live_status[val]); - return; - } + val = I915_READ(EDP_PSR_STATUS); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; } - seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown"); + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); } static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct i915_psr *psr = &dev_priv->psr; intel_wakeref_t wakeref; - u32 psrperf = 0; - bool enabled = false; - bool sink_support; + const char *status; + bool enabled; + u32 val; if (!HAS_PSR(dev_priv)) return -ENODEV; - sink_support = dev_priv->psr.sink_support; - seq_printf(m, "Sink_Support: %s\n", yesno(sink_support)); - if (!sink_support) + seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); + if (psr->dp) + seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); + seq_puts(m, "\n"); + + if (!psr->sink_support) return 0; wakeref = intel_runtime_pm_get(dev_priv); + mutex_lock(&psr->lock); - mutex_lock(&dev_priv->psr.lock); - seq_printf(m, "PSR mode: %s\n", - dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1"); - seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); - seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", - dev_priv->psr.busy_frontbuffer_bits); - - if (dev_priv->psr.psr2_enabled) - enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; + if (psr->enabled) + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; else - enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; + status = "disabled"; + seq_printf(m, "PSR mode: %s\n", status); - seq_printf(m, "Main link in standby mode: %s\n", - yesno(dev_priv->psr.link_standby)); + if (!psr->enabled) + goto unlock; - seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); + if (psr->psr2_enabled) { + val = I915_READ(EDP_PSR2_CTL); + enabled = val & EDP_PSR2_ENABLE; + } else { + val = I915_READ(EDP_PSR_CTL); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + enableddisabled(enabled), val); + psr_source_status(dev_priv, m); + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", + psr->busy_frontbuffer_bits); /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - psrperf = I915_READ(EDP_PSR_PERF_CNT) & - EDP_PSR_PERF_CNT_MASK; - - seq_printf(m, "Performance_Counter: %u\n", psrperf); + val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); } - psr_source_status(dev_priv, m); - mutex_unlock(&dev_priv->psr.lock); - - if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) { + if (psr->debug & I915_PSR_DEBUG_IRQ) { seq_printf(m, "Last attempted entry at: %lld\n", - dev_priv->psr.last_entry_attempt); - seq_printf(m, "Last exit at: %lld\n", - dev_priv->psr.last_exit); + psr->last_entry_attempt); + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); } +unlock: + mutex_unlock(&psr->lock); intel_runtime_pm_put(dev_priv, wakeref); + return 0; } -- cgit v1.2.3 From cc8853f57e00511f46386c8e7910e00c5b2e58ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 17 Jan 2019 12:55:47 -0800 Subject: drm/i915: Add PSR2 selective update status registers and bits definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This register contains how many blocks was sent in the past selective updates. Those registers are not kept set all the times but polling it after flip can show the values corresponding to the last 8 frames. v2: Improved macros(Dhinakaran) Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Reviewed-by: Dhinakaran Pandiyan Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190117205548.28378-3-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 93cbd057c07a..f4e447437d75 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4272,6 +4272,15 @@ enum { #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) #define EDP_PSR2_STATUS_STATE_SHIFT 28 +#define _PSR2_SU_STATUS_0 0x6F914 +#define _PSR2_SU_STATUS_1 0x6F918 +#define _PSR2_SU_STATUS_2 0x6F91C +#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1)) +#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3)) +#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) +#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) +#define PSR2_SU_STATUS_FRAMES 8 + /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) -- cgit v1.2.3 From a81f781a32384aef7e5b58854112881674c59e9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Thu, 17 Jan 2019 12:55:48 -0800 Subject: drm/i915/debugfs: Print PSR selective update status register values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The value of this registers will be used to test if PSR2 is doing selective update and if the number of blocks match with the expected. v2: - Using new macros - Changed the string output v3: - reading PSR2_SU_STATUS registers together(Dhinakaran) - printing SU blocks of frames with 0 updates(Dhinakaran) Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Reviewed-by: Dhinakaran Pandiyan Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190117205548.28378-4-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fb59874fed99..9a9e1da496dc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2608,6 +2608,29 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Last exit at: %lld\n", psr->last_exit); } + if (psr->psr2_enabled) { + u32 su_frames_val[3]; + int frame; + + /* + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) + su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame)); + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { + u32 su_blocks; + + su_blocks = su_frames_val[frame / 3] & + PSR2_SU_STATUS_MASK(frame); + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); + seq_printf(m, "%d\t%d\n", frame, su_blocks); + } + } + unlock: mutex_unlock(&psr->lock); intel_runtime_pm_put(dev_priv, wakeref); -- cgit v1.2.3 From 2e679d48f38c378650db403b4ba2248adf0691b2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 21 Jan 2019 11:51:41 +0200 Subject: drm/i915/gvt: switch to kernel types Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' Acked-by: Zhenyu Wang Signed-off-by: Jani Nikula Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 14 +++++++------- drivers/gpu/drm/i915/gvt/handlers.c | 6 +++--- drivers/gpu/drm/i915/gvt/kvmgt.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/gvt/mmio.c | 6 +++--- drivers/gpu/drm/i915/gvt/sched_policy.c | 2 +- drivers/gpu/drm/i915/gvt/scheduler.h | 2 +- 6 files changed, 27 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a04e8aa58547..35b4ec3f7618 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -399,10 +399,10 @@ struct cmd_info { #define R_VECS (1 << VECS) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) /* rings that support this cmd: BLT/RCS/VCS/VECS */ - uint16_t rings; + u16 rings; /* devices that support this cmd: SNB/IVB/HSW/... */ - uint16_t devices; + u16 devices; /* which DWords are address that need fix up. * bit 0 means a 32-bit non address operand in command @@ -412,13 +412,13 @@ struct cmd_info { * No matter the address length, each address only takes * one bit in the bitmap. */ - uint16_t addr_bitmap; + u16 addr_bitmap; /* flag == F_LEN_CONST : command length * flag == F_LEN_VAR : length bias bits * Note: length is in DWord */ - uint8_t len; + u8 len; parser_cmd_handler handler; }; @@ -1639,7 +1639,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) { unsigned long gma = 0; const struct cmd_info *info; - uint32_t cmd_len = 0; + u32 cmd_len = 0; bool bb_end = false; struct intel_vgpu *vgpu = s->vgpu; u32 cmd; @@ -2678,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) I915_GTT_PAGE_SIZE))) return -EINVAL; - ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); + ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32); ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES, PAGE_SIZE); gma_head = wa_ctx->indirect_ctx.guest_gma; @@ -2845,7 +2845,7 @@ put_obj: static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; + u32 per_ctx_start[CACHELINE_DWORDS] = {0}; unsigned char *bb_start_sva; if (!wa_ctx->per_ctx.valid) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 68a62ba5bf54..9c106e47e640 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -278,7 +278,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 old, new; - uint32_t ack_reg_offset; + u32 ack_reg_offset; old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); @@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, } static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, - uint8_t t) + u8 t) { if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { /* training pattern 1 for CR */ @@ -917,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, if (op == GVT_AUX_NATIVE_WRITE) { int t; - uint8_t buf[16]; + u8 buf[16]; if ((addr + len + 1) >= DPCD_SIZE) { /* diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a19e684e621a..f8d44e8f86a6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -712,7 +712,7 @@ static void intel_vgpu_release_work(struct work_struct *work) __intel_vgpu_release(vgpu); } -static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) +static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) { u32 start_lo, start_hi; u32 mem_type; @@ -739,10 +739,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) return ((u64)start_hi << 32) | start_lo; } -static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, +static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, void *buf, unsigned int count, bool is_write) { - uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar); + u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar); int ret; if (is_write) @@ -754,13 +754,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, return ret; } -static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) +static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) { return off >= vgpu_aperture_offset(vgpu) && off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); } -static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, +static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, void *buf, unsigned long count, bool is_write) { void *aperture_va; @@ -792,7 +792,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); - uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; @@ -1038,7 +1038,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, - unsigned int count, uint32_t flags, + unsigned int count, u32 flags, void *data) { return 0; @@ -1046,21 +1046,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, - unsigned int count, uint32_t flags, void *data) + unsigned int count, u32 flags, void *data) { return 0; } static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, - uint32_t flags, void *data) + u32 flags, void *data) { return 0; } static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, - uint32_t flags, void *data) + u32 flags, void *data) { struct eventfd_ctx *trigger; @@ -1079,12 +1079,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, return 0; } -static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags, +static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, unsigned int index, unsigned int start, unsigned int count, void *data) { int (*func)(struct intel_vgpu *vgpu, unsigned int index, - unsigned int start, unsigned int count, uint32_t flags, + unsigned int start, unsigned int count, u32 flags, void *data) = NULL; switch (index) { diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 43f65848ecd6..ed4df2f6d60b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) -static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, +static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes, bool read) { struct intel_gvt *gvt = NULL; @@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; @@ -171,7 +171,7 @@ out: * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c32e7d5e8629..951cfee85902 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; - static uint64_t stage_check; + static u64 stage_check; int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..1e9eec6a32fe 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -61,7 +61,7 @@ struct shadow_indirect_ctx { unsigned long guest_gma; unsigned long shadow_gma; void *shadow_va; - uint32_t size; + u32 size; }; #define PER_CTX_ADDR_MASK 0xfffff000 -- cgit v1.2.3 From a9dc3395fc8bc460761f853b71971bdc1671560f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 18 Jan 2019 14:01:18 +0200 Subject: drm/i915/sdvo: switch to kernel types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mixed C99 and kernel types use is getting ugly. Prefer kernel types. sed -i 's/\buint\(8\|16\|32\|64\)_t\b/u\1/g' v2: rebase Acked-by: Chris Wilson Acked-by: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190118120125.15484-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_sdvo.c | 78 +++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index df2d830a7405..e7b0884ba5a5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -76,7 +76,7 @@ struct intel_sdvo { i915_reg_t sdvo_reg; /* Active outputs controlled by this SDVO output */ - uint16_t controlled_output; + u16 controlled_output; /* * Capabilities of the SDVO device returned by @@ -91,12 +91,12 @@ struct intel_sdvo { * For multiple function SDVO device, * this is for current attached outputs. */ - uint16_t attached_output; + u16 attached_output; /* * Hotplug activation bits for this device */ - uint16_t hotplug_active; + u16 hotplug_active; enum port port; @@ -104,19 +104,19 @@ struct intel_sdvo { bool has_hdmi_audio; /* DDC bus used by this SDVO encoder */ - uint8_t ddc_bus; + u8 ddc_bus; /* * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd */ - uint8_t dtd_sdvo_flags; + u8 dtd_sdvo_flags; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ - uint16_t output_flag; + u16 output_flag; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; @@ -184,7 +184,7 @@ to_intel_sdvo_connector(struct drm_connector *connector) container_of((conn_state), struct intel_sdvo_connector_state, base.base) static bool -intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, @@ -746,9 +746,9 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, - uint16_t clock, - uint16_t width, - uint16_t height) + u16 clock, + u16 width, + u16 height) { struct intel_sdvo_preferred_input_timing_args args; @@ -791,9 +791,9 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { - uint16_t width, height; - uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; - uint16_t h_sync_offset, v_sync_offset; + u16 width, height; + u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; + u16 h_sync_offset, v_sync_offset; int mode_clock; memset(dtd, 0, sizeof(*dtd)); @@ -898,13 +898,13 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, - uint8_t mode) + u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, - uint8_t mode) + u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } @@ -913,11 +913,11 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; - uint8_t set_buf_index[2]; - uint8_t av_split; - uint8_t buf_size; - uint8_t buf[48]; - uint8_t *pos; + u8 set_buf_index[2]; + u8 av_split; + u8 buf_size; + u8 buf[48]; + u8 *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); @@ -940,11 +940,11 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) #endif static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, - unsigned if_index, uint8_t tx_rate, - const uint8_t *data, unsigned length) + unsigned int if_index, u8 tx_rate, + const u8 *data, unsigned int length) { - uint8_t set_buf_index[2] = { if_index, 0 }; - uint8_t hbuf_size, tmp[8]; + u8 set_buf_index[2] = { if_index, 0 }; + u8 hbuf_size, tmp[8]; int i; if (!intel_sdvo_set_value(intel_sdvo, @@ -984,7 +984,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, { const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; union hdmi_infoframe frame; int ret; ssize_t len; @@ -1017,7 +1017,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state) { struct intel_sdvo_tv_format format; - uint32_t format_map; + u32 format_map; format_map = 1 << conn_state->tv.mode; memset(&format, 0, sizeof(format)); @@ -1208,7 +1208,7 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state = &sdvo_state->base.base; struct intel_sdvo_connector *intel_sdvo_conn = to_intel_sdvo_connector(conn_state->connector); - uint16_t val; + u16 val; if (intel_sdvo_conn->left) UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H); @@ -1692,10 +1692,10 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) +static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); - uint16_t hotplug; + u16 hotplug; if (!I915_HAS_HOTPLUG(dev_priv)) return 0; @@ -1826,7 +1826,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { - uint16_t response; + u16 response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; @@ -1977,7 +1977,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; - uint32_t reply = 0, format_map = 0; + u32 reply = 0, format_map = 0; int i; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2062,7 +2062,7 @@ static int intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, - uint64_t *val) + u64 *val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state); @@ -2121,7 +2121,7 @@ static int intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, - uint64_t val) + u64 val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); @@ -2270,7 +2270,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { - uint16_t mask = 0; + u16 mask = 0; unsigned int num_bits; /* @@ -2671,7 +2671,7 @@ err: } static bool -intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ @@ -2747,7 +2747,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; - uint32_t format_map, i; + u32 format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; @@ -2814,7 +2814,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); - uint16_t response, data_value[2]; + u16 response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { @@ -2925,7 +2925,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; - uint16_t response, data_value[2]; + u16 response, data_value[2]; ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS); @@ -2939,7 +2939,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, { union { struct intel_sdvo_enhancements_reply reply; - uint16_t response; + u16 response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); -- cgit v1.2.3 From c25f0c6a0426527134d992bb4782cf5abdf962b6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 22 Jan 2019 18:32:27 -0800 Subject: drm/i915/icl: do a posting read after irq install When reading GEN11_GT_INTR_DWx closely after enabling the interrupts in gen11_irq_postinstall, the returned value is garbage. This can cause other parts of the setup code (e.g. gen11_reset_one_iir) to think that there are interrupts to be cleared when there are none. The garbage value is only seen on the first read done after the enable, so this looks like a posting issue. Adding a posting read after enabling the interrupts does indeed fix the problem. Note that the posting read has been purposely added outside of gen11_master_intr_enable since the issue has only been observed when the full interrupt setup is performed. Cc: Mika Kuoppala Signed-off-by: Daniele Ceraolo Spurio Acked-by: Chris Wilson Acked-by: Mika Kuoppala Signed-off-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190123023227.8117-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5fd5080c4ccb..7056ae2d1e0e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4089,6 +4089,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); gen11_master_intr_enable(dev_priv->regs); + POSTING_READ(GEN11_GFX_MSTR_IRQ); return 0; } -- cgit v1.2.3 From 6e062b60b0b1bd82cac475e63cdb8c451647182b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 23 Jan 2019 13:51:55 +0000 Subject: drm/i915/execlists: Mark up priority boost on preemption Record the priority boost we giving to the preempted client or else we may end up in a situation where the priority queue no longer matches the request priority order and so we can end up in an infinite loop of preempting the same pair of requests. Fixes: e9eaf82d97a2 ("drm/i915: Priority boost for waiting clients") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190123135155.21562-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 436e59724900..8aa8a4862543 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -302,6 +302,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) */ if (!(prio & I915_PRIORITY_NEWCLIENT)) { prio |= I915_PRIORITY_NEWCLIENT; + active->sched.attr.priority = prio; list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); } @@ -625,6 +626,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { + GEM_BUG_ON(last && + need_preempt(engine, last, rq_prio(rq))); + /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not -- cgit v1.2.3 From 3c8861d84a4d2c6cd7221d18e49bf9201c6c6115 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 17 Dec 2018 14:44:14 -0800 Subject: drm: Add color management LUT validation helper (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some hardware may place additional restrictions on the gamma/degamma curves described by our LUT properties. E.g., that a gamma curve never decreases or that the red/green/blue channels of a LUT's entries must be equal. Let's add a helper function that drivers can use to test that a userspace-provided LUT is valid and doesn't violate hardware requirements. v2: - Combine into a single helper that just takes a bitmask of the tests to apply. (Brian Starkey) - Add additional check (always performed) that LUT property blob size is always a multiple of the LUT entry size. (stolen from ARM driver) v3: - Drop the LUT size check again since drm_atomic_replace_property_blob_from_id() already covers this for us. (Alexandru Gheorghe) v4: - Use an enum to describe possible test values rather than #define's; this is cleaner to provide kerneldoc for. (Daniel Vetter) - s/DRM_COLOR_LUT_INCREASING/DRM_COLOR_LUT_NON_DECREASING/. (Ville) Cc: Uma Shankar Cc: Swati Sharma Cc: Brian Starkey Cc: Daniel Vetter Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Brian Starkey Reviewed-by: Alexandru Gheorghe Reviewed-by: Uma Shankar Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20181217224415.12848-1-matthew.d.roper@intel.com --- drivers/gpu/drm/drm_color_mgmt.c | 44 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 07dcf47daafe..968ca7c91ad8 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -462,3 +462,47 @@ int drm_plane_create_color_properties(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_color_properties); + +/** + * drm_color_lut_check - check validity of lookup table + * @lut: property blob containing LUT to check + * @tests: bitmask of tests to run + * + * Helper to check whether a userspace-provided lookup table is valid and + * satisfies hardware requirements. Drivers pass a bitmask indicating which of + * the tests in &drm_color_lut_tests should be performed. + * + * Returns 0 on success, -EINVAL on failure. + */ +int drm_color_lut_check(struct drm_property_blob *lut, + uint32_t tests) +{ + struct drm_color_lut *entry; + int i; + + if (!lut || !tests) + return 0; + + entry = lut->data; + for (i = 0; i < drm_color_lut_size(lut); i++) { + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { + if (entry[i].red != entry[i].blue || + entry[i].red != entry[i].green) { + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); + return -EINVAL; + } + } + + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { + if (entry[i].red < entry[i - 1].red || + entry[i].green < entry[i - 1].green || + entry[i].blue < entry[i - 1].blue) { + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_color_lut_check); -- cgit v1.2.3 From 85e2d61e49768f85346fe7ff133625eabf0946fe Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 18 Dec 2018 09:51:58 -0800 Subject: drm/i915: Validate userspace-provided color management LUT's (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We currently program userspace-provided gamma and degamma LUT's into our hardware without really checking to see whether they satisfy our hardware's rules. We should try to catch tables that are invalid for our hardware early and reject the atomic transaction. All of our platforms that accept a degamma LUT expect that the entries in the LUT are always flat or increasing, never decreasing. Also, our GLK and ICL platforms only accept degamma tables with r=g=b entries; so we should also add the relevant checks for that in anticipation of degamma support landing for those platforms. v2: - Use new API (single check function with bitmask of tests to apply) - Call helper for our gamma table as well (with no additional tests specified) so that the table size will be validated. v3: - Don't call on the gamma table since the LUT size is already tested at property blob upload and we don't have any additional hardware constraints for that LUT. v4: - Apply equal color channel check on gen10 as well; the bspec has some strange tagging for CNL platforms, but this appears to apply there as well. (Ville) Cc: Uma Shankar Cc: Swati Sharma Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20181218175158.5739-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/intel_color.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 299eb7858adc..bc7589656a8f 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -609,10 +609,26 @@ int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); size_t gamma_length, degamma_length; + uint32_t tests = DRM_COLOR_LUT_NON_DECREASING; degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size; + /* + * All of our platforms mandate that the degamma curve be + * non-decreasing. Additionally, GLK and gen11 only accept a single + * value for red, green, and blue in the degamma table. Make sure + * userspace didn't try to pass us something we can't handle. + * + * We don't have any extra hardware constraints on the gamma table, + * so no need to explicitly check it. + */ + if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) + tests |= DRM_COLOR_LUT_EQUAL_CHANNELS; + + if (drm_color_lut_check(crtc_state->base.degamma_lut, tests) != 0) + return -EINVAL; + /* * We allow both degamma & gamma luts at the right size or * NULL. -- cgit v1.2.3 From 63cb4e641af1e81796a9e45c10a17a88742b5a2d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:01 +0200 Subject: drm/i915/crt: split out intel_crt_present() to platform specific setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With new platforms not having CRT support and most conditions in intel_crt_present() being specific to DDI, split out the CRT initialization to platform specific blocks in the if ladder. Add new Pineview block for this. This puts intel_crt_init() more in line with the rest of the outputs, and makes it slightly easier for the uninitiated to figure out which platforms actually have what. v2: keep gen >= 9 check in intel_ddi_crt_present() (Ville) Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2fa9f4aec08e..9e0f34524d0b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14245,7 +14245,7 @@ static bool has_edp_a(struct drm_i915_private *dev_priv) return true; } -static bool intel_crt_present(struct drm_i915_private *dev_priv) +static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) >= 9) return false; @@ -14253,15 +14253,12 @@ static bool intel_crt_present(struct drm_i915_private *dev_priv) if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) return false; - if (IS_CHERRYVIEW(dev_priv)) - return false; - if (HAS_PCH_LPT_H(dev_priv) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) return false; /* DDI E can't be used if DDI A requires 4 lanes */ - if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; if (!dev_priv->vbt.int_crt_support) @@ -14323,9 +14320,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) */ intel_lvds_init(dev_priv); - if (intel_crt_present(dev_priv)) - intel_crt_init(dev_priv); - if (IS_ICELAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); @@ -14354,6 +14348,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (HAS_DDI(dev_priv)) { int found; + if (intel_ddi_crt_present(dev_priv)) + intel_crt_init(dev_priv); + /* * Haswell uses DDI functions to detect digital outputs. * On SKL pre-D0 the strap isn't connected, so we assume @@ -14385,6 +14382,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (HAS_PCH_SPLIT(dev_priv)) { int found; + + if (dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); if (has_edp_a(dev_priv)) @@ -14413,6 +14414,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; + if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14455,9 +14459,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } vlv_dsi_init(dev_priv); - } else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) { + } else if (IS_PINEVIEW(dev_priv)) { + if (dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { bool found = false; + if (dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); @@ -14489,8 +14499,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); - } else if (IS_GEN(dev_priv, 2)) + } else if (IS_GEN(dev_priv, 2)) { + if (dev_priv->vbt.int_crt_support) + intel_crt_init(dev_priv); + intel_dvo_init(dev_priv); + } if (SUPPORTS_TV(dev_priv)) intel_tv_init(dev_priv); -- cgit v1.2.3 From 0fafa22692773e950182cba48cd886fe93ec14c8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:02 +0200 Subject: drm/i915/lvds: only call intel_lvds_init() on platforms that might have LVDS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With new platforms not having LVDS support, only call intel_lvds_init() on platforms that might actually have LVDS. Move the comment about eDP init to the PCH block where it's relevant. This puts intel_lvds_init() more in line with the rest of the outputs, and makes it slightly easier for the uninitiated to figure out which platforms actually have what. Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9e0f34524d0b..29a7dd4afe0e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14313,13 +14313,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - /* - * intel_edp_init_connector() depends on this completing first, to - * prevent the registeration of both eDP and LVDS and the incorrect - * sharing of the PPS. - */ - intel_lvds_init(dev_priv); - if (IS_ICELAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); @@ -14383,6 +14376,13 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (HAS_PCH_SPLIT(dev_priv)) { int found; + /* + * intel_edp_init_connector() depends on this completing first, + * to prevent the registration of both eDP and LVDS and the + * incorrect sharing of the PPS. + */ + intel_lvds_init(dev_priv); + if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); @@ -14460,11 +14460,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) vlv_dsi_init(dev_priv); } else if (IS_PINEVIEW(dev_priv)) { + intel_lvds_init(dev_priv); + if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { bool found = false; + intel_lvds_init(dev_priv); + if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); @@ -14500,6 +14504,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); } else if (IS_GEN(dev_priv, 2)) { + intel_lvds_init(dev_priv); + if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); -- cgit v1.2.3 From 9bedc7edf624a6c552d9d382712a56a705f40a41 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:03 +0200 Subject: drm/i915/lvds: nuke intel_lvds_supported() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that intel_lvds_init() is only called for platforms that might have LVDS, move the remaining checks to intel_setup_outputs(), again similar to other outputs, and remove the overlapping checks. Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 6 ++++-- drivers/gpu/drm/i915/intel_lvds.c | 23 ----------------------- 2 files changed, 4 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 29a7dd4afe0e..db0f15242ccf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14467,7 +14467,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { bool found = false; - intel_lvds_init(dev_priv); + if (IS_MOBILE(dev_priv)) + intel_lvds_init(dev_priv); if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); @@ -14504,7 +14505,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); } else if (IS_GEN(dev_priv, 2)) { - intel_lvds_init(dev_priv); + if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) + intel_lvds_init(dev_priv); if (dev_priv->vbt.int_crt_support) intel_crt_init(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 46a5dfd5cdf7..815ed463d9c5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -798,26 +798,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; } -static bool intel_lvds_supported(struct drm_i915_private *dev_priv) -{ - /* - * With the introduction of the PCH we gained a dedicated - * LVDS presence pin, use it. - */ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - return true; - - /* - * Otherwise LVDS was only attached to mobile products, - * except for the inglorious 830gm - */ - if (INTEL_GEN(dev_priv) <= 4 && - IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) - return true; - - return false; -} - /** * intel_lvds_init - setup LVDS connectors on this device * @dev_priv: i915 device @@ -842,9 +822,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) u8 pin; u32 allowed_scalers; - if (!intel_lvds_supported(dev_priv)) - return; - /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { WARN(!dev_priv->vbt.int_lvds_support, -- cgit v1.2.3 From d6521463897bc673ed929e87a884b7a6ef641f64 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:04 +0200 Subject: drm/i915/tv: only call intel_tv_init() on platforms that might have TV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With most platforms not having TV support, only call intel_tv_init() on platforms that might actually have TV, specifically gens 3 and 4. This puts intel_tv_init() more in line with the rest of the outputs, and makes it slightly easier for the uninitiated to figure out which platforms actually have what. Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index db0f15242ccf..8e89f04ddd9c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14504,6 +14504,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); + + if (SUPPORTS_TV(dev_priv)) + intel_tv_init(dev_priv); } else if (IS_GEN(dev_priv, 2)) { if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) intel_lvds_init(dev_priv); @@ -14514,9 +14517,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_dvo_init(dev_priv); } - if (SUPPORTS_TV(dev_priv)) - intel_tv_init(dev_priv); - intel_psr_init(dev_priv); for_each_intel_encoder(&dev_priv->drm, encoder) { -- cgit v1.2.3 From a5916fd7a1f151a7890329787fa703a9215837f2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:05 +0200 Subject: drm/i915: rename has_edp_a() to ilk_has_edp_a() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clarify that the name is specific to ILK+ PCH platforms. v2: prefix the name with ilk rather than pch (Ville) Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-5-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8e89f04ddd9c..9895ea566f99 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14231,7 +14231,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) return index_mask; } -static bool has_edp_a(struct drm_i915_private *dev_priv) +static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) { if (!IS_MOBILE(dev_priv)) return false; @@ -14388,7 +14388,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); - if (has_edp_a(dev_priv)) + if (ilk_has_edp_a(dev_priv)) intel_dp_init(dev_priv, DP_A, PORT_A); if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { -- cgit v1.2.3 From 346073cee660bbbede71244b64ea34f66f21c36e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:06 +0200 Subject: drm/i915/lvds: simplify gen 2 lvds presence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen 2 mobile and not I830 is, in fact, I85X. Simplify. Suggested-by: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-6-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9895ea566f99..ed3780f24638 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14508,7 +14508,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (SUPPORTS_TV(dev_priv)) intel_tv_init(dev_priv); } else if (IS_GEN(dev_priv, 2)) { - if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) + if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); if (dev_priv->vbt.int_crt_support) -- cgit v1.2.3 From 74d021eaa70a1add287a5c65ba0fbc34606b8484 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 22 Jan 2019 10:23:07 +0200 Subject: drm/i915/crt: simplify CRT VBT check on pre-VLV/DDI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The VBT int_crt_support can't be trusted on earlier platforms, and is always set to true in intel_bios.c for pre-DDI and pre-VLV platforms. We can simplify the output setup by unconditionally calling intel_crt_init() for these platforms. Suggested-by: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190122082307.4003-7-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_display.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3780f24638..d328599240cb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14382,9 +14382,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * incorrect sharing of the PPS. */ intel_lvds_init(dev_priv); - - if (dev_priv->vbt.int_crt_support) - intel_crt_init(dev_priv); + intel_crt_init(dev_priv); dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); @@ -14461,17 +14459,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) vlv_dsi_init(dev_priv); } else if (IS_PINEVIEW(dev_priv)) { intel_lvds_init(dev_priv); - - if (dev_priv->vbt.int_crt_support) - intel_crt_init(dev_priv); + intel_crt_init(dev_priv); } else if (IS_GEN_RANGE(dev_priv, 3, 4)) { bool found = false; if (IS_MOBILE(dev_priv)) intel_lvds_init(dev_priv); - if (dev_priv->vbt.int_crt_support) - intel_crt_init(dev_priv); + intel_crt_init(dev_priv); if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); @@ -14511,9 +14506,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); - if (dev_priv->vbt.int_crt_support) - intel_crt_init(dev_priv); - + intel_crt_init(dev_priv); intel_dvo_init(dev_priv); } -- cgit v1.2.3 From f6626e1d96ed33f16d5a8ca5df4bbc2b374738ee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Jan 2019 08:37:10 +0000 Subject: drm/i915: De-inline intel_context_init() Nip some inline spaghetti in the bud before the problem gets too bad. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190124083710.7033-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 8 ++++++++ drivers/gpu/drm/i915/i915_gem_context.h | 10 +++------- 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index fae68c4c4683..93e84751370f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -321,6 +321,14 @@ static u32 default_desc_template(const struct drm_i915_private *i915, return desc; } +void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + ce->gem_context = ctx; +} + static struct i915_gem_context * __create_hw_context(struct drm_i915_private *dev_priv, struct drm_i915_file_private *file_priv) diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 47d82ce7ba6a..3769438228f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -364,12 +364,8 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) kref_put(&ctx->ref, i915_gem_context_release); } -static inline void -intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - ce->gem_context = ctx; -} +void intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine); #endif /* !__I915_GEM_CONTEXT_H__ */ -- cgit v1.2.3 From 63a23d245b2cb094be99d274204e458eb2810410 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 8 Jan 2019 17:08:38 +0100 Subject: drm/i915/backlight: Restore backlight on resume, v3. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Restore our saved values for backlight. This way even with fastset on S4 resume we will correctly restore the backlight to the active values. Changes since v1: - Call enable_backlight() when backlight.level is set. On suspend backlight.enabled is always cleared, this makes it not a good indicator. Also check for crtc->state->active. Changes since v2: - Use the new update_pipe() callback to run this on resume as well. Signed-off-by: Maarten Lankhorst Cc: Tolga Cakir Cc: Basil Eric Rabi Cc: Hans de Goede Cc: Ville Syrjälä Reported-by: Ville Syrjälä Signed-off-by: Maarten Lankhorst Reviewed-by: Hans de Goede Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190108160842.13396-1-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/icl_dsi.c | 1 + drivers/gpu/drm/i915/intel_ddi.c | 2 ++ drivers/gpu/drm/i915/intel_dp.c | 1 + drivers/gpu/drm/i915/intel_drv.h | 3 +++ drivers/gpu/drm/i915/intel_lvds.c | 1 + drivers/gpu/drm/i915/intel_panel.c | 49 +++++++++++++++++++++++++++++--------- drivers/gpu/drm/i915/vlv_dsi.c | 1 + 7 files changed, 47 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 355b48d1c937..73a7bee24a66 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -1390,6 +1390,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->disable = gen11_dsi_disable; encoder->port = port; encoder->get_config = gen11_dsi_get_config; + encoder->update_pipe = intel_panel_update_backlight; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->type = INTEL_OUTPUT_DSI; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b0bb8dfc2ed5..acd94354afc8 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3556,6 +3556,8 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, intel_psr_enable(intel_dp, crtc_state); intel_edp_drrs_enable(intel_dp, crtc_state); + + intel_panel_update_backlight(encoder, crtc_state, conn_state); } static void intel_ddi_update_pipe(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f7d5314e3395..2e994b5ba40b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -7001,6 +7001,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_encoder->suspend = intel_dp_encoder_suspend; if (IS_CHERRYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 33b733d37706..47195320413a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -2023,6 +2023,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); +void intel_panel_update_backlight(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); extern struct drm_display_mode *intel_find_panel_downclock( struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 815ed463d9c5..b4aa49768e90 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -887,6 +887,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector_attach_encoder(intel_connector, intel_encoder); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 5a39a6347a7a..bb8612ab8ee1 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1087,20 +1087,11 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, intel_panel_actually_set_backlight(conn_state, panel->backlight.level); } -void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - - if (!panel->backlight.present) - return; - - DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); - - mutex_lock(&dev_priv->backlight_lock); WARN_ON(panel->backlight.max == 0); @@ -1117,6 +1108,24 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, panel->backlight.enabled = true; if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_UNBLANK; +} + +void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + + if (!panel->backlight.present) + return; + + DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); + + mutex_lock(&dev_priv->backlight_lock); + + __intel_panel_enable_backlight(crtc_state, conn_state); mutex_unlock(&dev_priv->backlight_lock); } @@ -1776,6 +1785,24 @@ static int pwm_setup_backlight(struct intel_connector *connector, return 0; } +void intel_panel_update_backlight(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + if (!panel->backlight.present) + return; + + mutex_lock(&dev_priv->backlight_lock); + if (!panel->backlight.enabled) + __intel_panel_enable_backlight(crtc_state, conn_state); + + mutex_unlock(&dev_priv->backlight_lock); +} + int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) { struct drm_i915_private *dev_priv = to_i915(connector->dev); diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 4d47910e5184..696b750acd1d 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -1697,6 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; intel_encoder->get_config = intel_dsi_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; intel_connector->get_hw_state = intel_connector_get_hw_state; -- cgit v1.2.3 From 5b1ec9ac7ab5b4520d4db98b7024a8dd5051b000 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 8 Jan 2019 17:08:39 +0100 Subject: drm/i915/backlight: Fix backlight takeover on LPT, v3. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On lynxpoint the bios sometimes sets up the backlight using the CPU display, but the driver expects using the PWM PCH override register. Read the value from the CPU register, then convert it to the other units by converting from the old duty cycle, to freq, to the new units. This value is then programmed in the override register, after which we set the override and disable the CPU display control. This allows us to switch the source without flickering, and make the backlight controls work in the driver. Changes since v1: - Read BLC_PWM_CPU_CTL2 to cpu_ctl2. - Clean up cpu_mode if slightly. - Always disable BLM_PWM_ENABLE in cpu_ctl2. Changes since v2: - Simplify cpu_mode handling (Jani) Signed-off-by: Maarten Lankhorst Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108225 Cc: Basil Eric Rabi Cc: Hans de Goede Cc: Tolga Cakir Cc: Ville Syrjälä Tested-by: Tolga Cakir Cc: Jani Nikula Reviewed-by: Hans de Goede Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190108160842.13396-2-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/intel_panel.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index bb8612ab8ee1..beca98d2b035 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1496,8 +1496,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 pch_ctl1, pch_ctl2, val; - bool alt; + u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; + bool alt, cpu_mode; if (HAS_PCH_LPT(dev_priv)) alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; @@ -1511,6 +1511,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); panel->backlight.max = pch_ctl2 >> 16; + cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); + if (!panel->backlight.max) panel->backlight.max = get_backlight_max_vbt(connector); @@ -1519,12 +1521,28 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.min = get_backlight_min_vbt(connector); - val = lpt_get_backlight(connector); + panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; + + cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) && + !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) && + (cpu_ctl2 & BLM_PWM_ENABLE); + if (cpu_mode) + val = pch_get_backlight(connector); + else + val = lpt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, panel->backlight.max); - panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; + if (cpu_mode) { + DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n"); + + /* Write converted CPU PWM value to PCH override register */ + lpt_set_backlight(connector->base.state, panel->backlight.level); + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); + + I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE); + } return 0; } -- cgit v1.2.3 From d19f958db23c14c857e3eaa0cefa6a9c55e1468d Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 8 Jan 2019 17:08:40 +0100 Subject: drm/i915: Enable fastset for non-boot modesets. Now that our state comparison functions are pretty complete, we should enable fastset by default when a modeset can be avoided. Even if we're not completely certain about the inherited state, we can be certain after the first modeset that our sw state matches the hw state. There is one testcase explicitly testing fastset, kms_panel_fitting.atomic-fastset but other testcases do so indirectly because most tests don't clean up the display during exit, or otherwise indirectly preserve mode by doing igt_display_reset or inheriting during init. Signed-off-by: Maarten Lankhorst Reviewed-by: Hans de Goede Cc: Daniel Vetter Reviewed-by: Hans de Goede [mlankhorst: Use DRM_DEBUG_KMS. (j4ni)] Link: https://patchwork.freedesktop.org/patch/msgid/20190108160842.13396-3-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d328599240cb..a02ce5a47f44 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11700,6 +11700,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); + if (fixup_inherited && !i915_modparams.fastboot) { + DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); + ret = false; + } + #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_err(adjust, __stringify(name), \ @@ -12723,8 +12728,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } - if (i915_modparams.fastboot && - intel_pipe_config_compare(dev_priv, + if (intel_pipe_config_compare(dev_priv, to_intel_crtc_state(old_crtc_state), pipe_config, true)) { crtc_state->mode_changed = false; -- cgit v1.2.3 From 0cdc1d07b4616f164978e8f1b3a5fe8b0a3ac835 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 8 Jan 2019 17:08:41 +0100 Subject: drm/i915: Make HW readout mark CRTC scaler as in use. This way we don't accidentally double allocate it. Noticed this when I wrote a patch to sanity check all of the scaler state. Signed-off-by: Maarten Lankhorst Reviewed-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190108160842.13396-4-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a02ce5a47f44..36c1126cbc85 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8878,6 +8878,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, pipe_config->pch_pfit.enabled = true; pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); + scaler_state->scalers[i].in_use = true; break; } } -- cgit v1.2.3 From 85baa5dbf79163026dcb78f742294c522e176432 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 24 Jan 2019 15:00:59 -0800 Subject: drm/i915: Update DRIVER_DATE to 20190124 Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 03db011caa8e..3c111ad09922 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -91,8 +91,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190110" -#define DRIVER_TIMESTAMP 1547162337 +#define DRIVER_DATE "20190124" +#define DRIVER_TIMESTAMP 1548370857 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- cgit v1.2.3