3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-11 17:55:54 +00:00

Torchie The Cat more source

This commit is contained in:
Raziel K. Crowe 2022-09-07 22:32:58 +05:00
parent e9f98f07aa
commit a240e076b6
99 changed files with 31656 additions and 0 deletions

View File

@ -0,0 +1,197 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* dw-hdmi-gp-audio.c
*
* Copyright 2020-2022 NXP
*/
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_edid.h>
#include <drm/drm_connector.h>
#include <sound/hdmi-codec.h>
#include <sound/asoundef.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_iec958.h>
#include <sound/dmaengine_pcm.h>
#include "dw-hdmi-audio.h"
#define DRIVER_NAME "dw-hdmi-gp-audio"
#define DRV_NAME "hdmi-gp-audio"
struct snd_dw_hdmi {
struct dw_hdmi_audio_data data;
struct platform_device *audio_pdev;
unsigned int pos;
};
struct dw_hdmi_channel_conf {
u8 conf1;
u8 ca;
};
/*
* The default mapping of ALSA channels to HDMI channels and speaker
* allocation bits. Note that we can't do channel remapping here -
* channels must be in the same order.
*
* Mappings for alsa-lib pcm/surround*.conf files:
*
* Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1
* Channels 2 4 6 6 6 8
*
* Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
*
* Number of ALSA channels
* ALSA Channel 2 3 4 5 6 7 8
* 0 FL:0 = = = = = =
* 1 FR:1 = = = = = =
* 2 FC:3 RL:4 LFE:2 = = =
* 3 RR:5 RL:4 FC:3 = =
* 4 RR:5 RL:4 = =
* 5 RR:5 = =
* 6 RC:6 =
* 7 RLC/FRC RLC/FRC
*/
static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
{ 0x03, 0x00 }, /* FL,FR */
{ 0x0b, 0x02 }, /* FL,FR,FC */
{ 0x33, 0x08 }, /* FL,FR,RL,RR */
{ 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
{ 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
{ 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
{ 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
};
static int audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
u8 ca;
dw_hdmi_set_sample_rate(dw->data.hdmi, params->sample_rate);
ca = default_hdmi_channel_config[params->channels - 2].ca;
dw_hdmi_set_channel_count(dw->data.hdmi, params->channels);
dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
dw_hdmi_set_sample_non_pcm(dw->data.hdmi,
params->iec.status[0] & IEC958_AES0_NONAUDIO);
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
return 0;
}
static void audio_shutdown(struct device *dev, void *data)
{
}
static int audio_mute_stream(struct device *dev, void *data,
bool enable, int direction)
{
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
if (!enable)
dw_hdmi_audio_enable(dw->data.hdmi);
else
dw_hdmi_audio_disable(dw->data.hdmi);
return 0;
}
static int audio_get_eld(struct device *dev, void *data,
u8 *buf, size_t len)
{
struct dw_hdmi_audio_data *audio = data;
u8 *eld;
eld = audio->get_eld(audio->hdmi);
if (eld)
memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len));
else
/* Pass en empty ELD if connector not available */
memset(buf, 0, len);
return 0;
}
static int audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
return dw_hdmi_set_plugged_cb(dw->data.hdmi, fn, codec_dev);
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = audio_hw_params,
.audio_shutdown = audio_shutdown,
.mute_stream = audio_mute_stream,
.get_eld = audio_get_eld,
.hook_plugged_cb = audio_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
{
struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
struct snd_dw_hdmi *dw;
const struct hdmi_codec_pdata codec_data = {
.i2s = 1,
.spdif = 0,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
.data = data,
};
dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
dw->data = *data;
platform_set_drvdata(pdev, dw);
dw->audio_pdev = platform_device_register_data(&pdev->dev,
HDMI_CODEC_DRV_NAME, 1,
&codec_data,
sizeof(codec_data));
return PTR_ERR_OR_ZERO(dw->audio_pdev);
}
static int snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
platform_device_unregister(dw->audio_pdev);
return 0;
}
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
.remove = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
};
module_platform_driver(snd_dw_hdmi_driver);
MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
MODULE_DESCRIPTION("Synopsys Designware HDMI GPA ALSA interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);

View File

@ -0,0 +1,51 @@
# SPDX-License-Identifier: MIT
config DRM_DP_AUX_BUS
tristate
depends on DRM
depends on OF || COMPILE_TEST
config DRM_DISPLAY_HELPER
tristate
depends on DRM
help
DRM helpers for display adapters.
config DRM_DISPLAY_DP_HELPER
bool
depends on DRM_DISPLAY_HELPER
help
DRM display helpers for DisplayPort.
config DRM_DISPLAY_HDCP_HELPER
bool
depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER
bool
depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDMI.
config DRM_DP_AUX_CHARDEV
bool "DRM DP AUX Interface"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
help
Choose this option to enable a /dev/drm_dp_auxN node that allows to
read and write values to arbitrary DPCD registers on the DP aux
channel.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
Note: not all adapters support this feature, and even for those
that do support this they often do not hook up the CEC pin.

View File

@ -0,0 +1,16 @@
# SPDX-License-Identifier: MIT
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += drm_hdmi_helper.o \
drm_scdc_helper.o
drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o

View File

@ -0,0 +1,22 @@
// SPDX-License-Identifier: MIT
#include <linux/module.h>
#include "drm_dp_helper_internal.h"
MODULE_DESCRIPTION("DRM display adapter helper");
MODULE_LICENSE("GPL and additional rights");
static int __init drm_display_helper_module_init(void)
{
return drm_dp_aux_dev_init();
}
static void __exit drm_display_helper_module_exit(void)
{
/* Call exit functions from specific dp helpers here */
drm_dp_aux_dev_exit();
}
module_init(drm_display_helper_module_init);
module_exit(drm_display_helper_module_exit);

View File

@ -0,0 +1,386 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2021 Google Inc.
*
* The DP AUX bus is used for devices that are connected over a DisplayPort
* AUX bus. The device on the far side of the bus is referred to as an
* endpoint in this code.
*
* There is only one device connected to the DP AUX bus: an eDP panel.
* Though historically panels (even DP panels) have been modeled as simple
* platform devices, putting them under the DP AUX bus allows the panel driver
* to perform transactions on that bus.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
struct dp_aux_ep_device_with_data {
struct dp_aux_ep_device aux_ep;
int (*done_probing)(struct drm_dp_aux *aux);
};
/**
* dp_aux_ep_match() - The match function for the dp_aux_bus.
* @dev: The device to match.
* @drv: The driver to try to match against.
*
* At the moment, we just match on device tree.
*
* Return: True if this driver matches this device; false otherwise.
*/
static int dp_aux_ep_match(struct device *dev, struct device_driver *drv)
{
return !!of_match_device(drv->of_match_table, dev);
}
/**
* dp_aux_ep_probe() - The probe function for the dp_aux_bus.
* @dev: The device to probe.
*
* Calls through to the endpoint driver probe.
*
* Return: 0 if no error or negative error code.
*/
static int dp_aux_ep_probe(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
struct dp_aux_ep_device_with_data *aux_ep_with_data =
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
ret = dev_pm_domain_attach(dev, true);
if (ret)
return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n");
ret = aux_ep_drv->probe(aux_ep);
if (ret)
goto err_attached;
if (aux_ep_with_data->done_probing) {
ret = aux_ep_with_data->done_probing(aux_ep->aux);
if (ret) {
/*
* The done_probing() callback should not return
* -EPROBE_DEFER to us. If it does, we treat it as an
* error. Passing it on as-is would cause the _panel_
* to defer.
*/
if (ret == -EPROBE_DEFER) {
dev_err(dev,
"DP AUX done_probing() can't defer\n");
ret = -EINVAL;
}
goto err_probed;
}
}
return 0;
err_probed:
if (aux_ep_drv->remove)
aux_ep_drv->remove(aux_ep);
err_attached:
dev_pm_domain_detach(dev, true);
return ret;
}
/**
* dp_aux_ep_remove() - The remove function for the dp_aux_bus.
* @dev: The device to remove.
*
* Calls through to the endpoint driver remove.
*/
static void dp_aux_ep_remove(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
if (aux_ep_drv->remove)
aux_ep_drv->remove(aux_ep);
dev_pm_domain_detach(dev, true);
}
/**
* dp_aux_ep_shutdown() - The shutdown function for the dp_aux_bus.
* @dev: The device to shutdown.
*
* Calls through to the endpoint driver shutdown.
*/
static void dp_aux_ep_shutdown(struct device *dev)
{
struct dp_aux_ep_driver *aux_ep_drv;
if (!dev->driver)
return;
aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
if (aux_ep_drv->shutdown)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
static struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
.remove = dp_aux_ep_remove,
.shutdown = dp_aux_ep_shutdown,
};
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return of_device_modalias(dev, buf, PAGE_SIZE);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *dp_aux_ep_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(dp_aux_ep_dev);
/**
* dp_aux_ep_dev_release() - Free memory for the dp_aux_ep device
* @dev: The device to free.
*/
static void dp_aux_ep_dev_release(struct device *dev)
{
struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
struct dp_aux_ep_device_with_data *aux_ep_with_data =
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
kfree(aux_ep_with_data);
}
static struct device_type dp_aux_device_type_type = {
.groups = dp_aux_ep_dev_groups,
.uevent = of_device_uevent_modalias,
.release = dp_aux_ep_dev_release,
};
/**
* of_dp_aux_ep_destroy() - Destroy an DP AUX endpoint device
* @dev: The device to destroy.
* @data: Not used
*
* This is just used as a callback by of_dp_aux_depopulate_bus() and
* is called for _all_ of the child devices of the device providing the AUX bus.
* We'll only act on those that are of type "dp_aux_bus_type".
*
* This function is effectively an inverse of what's in
* of_dp_aux_populate_bus(). NOTE: since we only populate one child
* then it's expected that only one device will match all the "if" tests in
* this function and get to the device_unregister().
*
* Return: 0 if no error or negative error code.
*/
static int of_dp_aux_ep_destroy(struct device *dev, void *data)
{
struct device_node *np = dev->of_node;
if (dev->bus != &dp_aux_bus_type)
return 0;
if (!of_node_check_flag(np, OF_POPULATED))
return 0;
of_node_clear_flag(np, OF_POPULATED);
of_node_put(np);
device_unregister(dev);
return 0;
}
/**
* of_dp_aux_depopulate_bus() - Undo of_dp_aux_populate_bus
* @aux: The AUX channel whose device we want to depopulate
*
* This will destroy the device that was created
* by of_dp_aux_populate_bus().
*/
void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux)
{
device_for_each_child_reverse(aux->dev, NULL, of_dp_aux_ep_destroy);
}
EXPORT_SYMBOL_GPL(of_dp_aux_depopulate_bus);
/**
* of_dp_aux_populate_bus() - Populate the endpoint device on the DP AUX
* @aux: The AUX channel whose device we want to populate. It is required that
* drm_dp_aux_init() has already been called for this AUX channel.
* @done_probing: Callback functions to call after EP device finishes probing.
* Will not be called if there are no EP devices and this
* function will return -ENODEV.
*
* This will populate the device (expected to be an eDP panel) under the
* "aux-bus" node of the device providing the AUX channel (AKA aux->dev).
*
* When this function finishes, it is _possible_ (but not guaranteed) that
* our sub-device will have finished probing. It should be noted that if our
* sub-device returns -EPROBE_DEFER or is probing asynchronously for some
* reason that we will not return any error codes ourselves but our
* sub-device will _not_ have actually probed successfully yet.
*
* In many cases it's important for the caller of this function to be notified
* when our sub device finishes probing. Our sub device is expected to be an
* eDP panel and the caller is expected to be an eDP controller. The eDP
* controller needs to be able to get a reference to the panel when it finishes
* probing. For this reason the caller can pass in a function pointer that
* will be called when our sub-device finishes probing.
*
* If this function succeeds you should later make sure you call
* of_dp_aux_depopulate_bus() to undo it, or just use the devm version
* of this function.
*
* Return: 0 if no error or negative error code; returns -ENODEV if there are
* no children. The done_probing() function won't be called in that
* case.
*/
int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
int (*done_probing)(struct drm_dp_aux *aux))
{
struct device_node *bus = NULL, *np = NULL;
struct dp_aux_ep_device *aux_ep;
struct dp_aux_ep_device_with_data *aux_ep_with_data;
int ret;
/* drm_dp_aux_init() should have been called already; warn if not */
WARN_ON_ONCE(!aux->ddc.algo);
if (!aux->dev->of_node)
return -ENODEV;
bus = of_get_child_by_name(aux->dev->of_node, "aux-bus");
if (!bus)
return -ENODEV;
np = of_get_next_available_child(bus, NULL);
of_node_put(bus);
if (!np)
return -ENODEV;
if (of_node_test_and_set_flag(np, OF_POPULATED)) {
dev_err(aux->dev, "DP AUX EP device already populated\n");
ret = -EINVAL;
goto err_did_get_np;
}
aux_ep_with_data = kzalloc(sizeof(*aux_ep_with_data), GFP_KERNEL);
if (!aux_ep_with_data) {
ret = -ENOMEM;
goto err_did_set_populated;
}
aux_ep_with_data->done_probing = done_probing;
aux_ep = &aux_ep_with_data->aux_ep;
aux_ep->aux = aux;
aux_ep->dev.parent = aux->dev;
aux_ep->dev.bus = &dp_aux_bus_type;
aux_ep->dev.type = &dp_aux_device_type_type;
aux_ep->dev.of_node = of_node_get(np);
dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
ret = device_register(&aux_ep->dev);
if (ret) {
dev_err(aux->dev, "Failed to create AUX EP for %pOF: %d\n", np, ret);
/*
* As per docs of device_register(), call this instead
* of kfree() directly for error cases.
*/
put_device(&aux_ep->dev);
goto err_did_set_populated;
}
return 0;
err_did_set_populated:
of_node_clear_flag(np, OF_POPULATED);
err_did_get_np:
of_node_put(np);
return ret;
}
EXPORT_SYMBOL_GPL(of_dp_aux_populate_bus);
static void of_dp_aux_depopulate_bus_void(void *data)
{
of_dp_aux_depopulate_bus(data);
}
/**
* devm_of_dp_aux_populate_bus() - devm wrapper for of_dp_aux_populate_bus()
* @aux: The AUX channel whose device we want to populate
* @done_probing: Callback functions to call after EP device finishes probing.
* Will not be called if there are no EP devices and this
* function will return -ENODEV.
*
* Handles freeing w/ devm on the device "aux->dev".
*
* Return: 0 if no error or negative error code; returns -ENODEV if there are
* no children. The done_probing() function won't be called in that
* case.
*/
int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
int (*done_probing)(struct drm_dp_aux *aux))
{
int ret;
ret = of_dp_aux_populate_bus(aux, done_probing);
if (ret)
return ret;
return devm_add_action_or_reset(aux->dev,
of_dp_aux_depopulate_bus_void, aux);
}
EXPORT_SYMBOL_GPL(devm_of_dp_aux_populate_bus);
int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *drv, struct module *owner)
{
drv->driver.owner = owner;
drv->driver.bus = &dp_aux_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__dp_aux_dp_driver_register);
void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(dp_aux_dp_driver_unregister);
static int __init dp_aux_bus_init(void)
{
int ret;
ret = bus_register(&dp_aux_bus_type);
if (ret)
return ret;
return 0;
}
static void __exit dp_aux_bus_exit(void)
{
bus_unregister(&dp_aux_bus_type);
}
subsys_initcall(dp_aux_bus_init);
module_exit(dp_aux_bus_exit);
MODULE_AUTHOR("Douglas Anderson <dianders@chromium.org>");
MODULE_DESCRIPTION("DRM DisplayPort AUX bus");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,354 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
*
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include "drm_dp_helper_internal.h"
struct drm_dp_aux_dev {
unsigned index;
struct drm_dp_aux *aux;
struct device *dev;
struct kref refcount;
atomic_t usecount;
};
#define DRM_AUX_MINORS 256
#define AUX_MAX_OFFSET (1 << 20)
static DEFINE_IDR(aux_idr);
static DEFINE_MUTEX(aux_idr_mutex);
static struct class *drm_dp_aux_dev_class;
static int drm_dev_major = -1;
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
{
struct drm_dp_aux_dev *aux_dev = NULL;
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int index;
aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL);
if (!aux_dev)
return ERR_PTR(-ENOMEM);
aux_dev->aux = aux;
atomic_set(&aux_dev->usecount, 1);
kref_init(&aux_dev->refcount);
mutex_lock(&aux_idr_mutex);
index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL);
mutex_unlock(&aux_idr_mutex);
if (index < 0) {
kfree(aux_dev);
return ERR_PTR(index);
}
aux_dev->index = index;
return aux_dev;
}
static void release_drm_dp_aux_dev(struct kref *ref)
{
struct drm_dp_aux_dev *aux_dev =
container_of(ref, struct drm_dp_aux_dev, refcount);
kfree(aux_dev);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t res;
struct drm_dp_aux_dev *aux_dev =
drm_dp_aux_dev_get_by_minor(MINOR(dev->devt));
if (!aux_dev)
return -ENODEV;
res = sprintf(buf, "%s\n", aux_dev->aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return res;
}
static DEVICE_ATTR_RO(name);
static struct attribute *drm_dp_aux_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(drm_dp_aux);
static int auxdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct drm_dp_aux_dev *aux_dev;
aux_dev = drm_dp_aux_dev_get_by_minor(minor);
if (!aux_dev)
return -ENODEV;
file->private_data = aux_dev;
return 0;
}
static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
{
return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
}
static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
while (iov_iter_count(to)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(to), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
if (copy_to_iter(buf, res, to) != res) {
res = -EFAULT;
break;
}
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(from, AUX_MAX_OFFSET - pos);
while (iov_iter_count(from)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(from), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
if (!copy_from_iter_full(buf, todo, from)) {
res = -EFAULT;
break;
}
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static int auxdev_release(struct inode *inode, struct file *file)
{
struct drm_dp_aux_dev *aux_dev = file->private_data;
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return 0;
}
static const struct file_operations auxdev_fops = {
.owner = THIS_MODULE,
.llseek = auxdev_llseek,
.read_iter = auxdev_read_iter,
.write_iter = auxdev_write_iter,
.open = auxdev_open,
.release = auxdev_release,
};
#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux)
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *iter, *aux_dev = NULL;
int id;
/* don't increase kref count here because this function should only be
* used by drm_dp_aux_unregister_devnode. Thus, it will always have at
* least one reference - the one that drm_dp_aux_register_devnode
* created
*/
mutex_lock(&aux_idr_mutex);
idr_for_each_entry(&aux_idr, iter, id) {
if (iter->aux == aux) {
aux_dev = iter;
break;
}
}
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
unsigned int minor;
aux_dev = drm_dp_aux_dev_get_by_aux(aux);
if (!aux_dev) /* attach must have failed */
return;
/*
* As some AUX adapters may exist as platform devices which outlive their respective DRM
* devices, we clear drm_dev to ensure that we never accidentally reference a stale pointer
*/
aux->drm_dev = NULL;
mutex_lock(&aux_idr_mutex);
idr_remove(&aux_idr, aux_dev->index);
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
minor = aux_dev->index;
if (aux_dev->dev)
device_destroy(drm_dp_aux_dev_class,
MKDEV(drm_dev_major, minor));
DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
}
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int res;
aux_dev = alloc_drm_dp_aux_dev(aux);
if (IS_ERR(aux_dev))
return PTR_ERR(aux_dev);
aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev,
MKDEV(drm_dev_major, aux_dev->index), NULL,
"drm_dp_aux%d", aux_dev->index);
if (IS_ERR(aux_dev->dev)) {
res = PTR_ERR(aux_dev->dev);
aux_dev->dev = NULL;
goto error;
}
DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n",
aux->name, aux_dev->index);
return 0;
error:
drm_dp_aux_unregister_devnode(aux);
return res;
}
int drm_dp_aux_dev_init(void)
{
int res;
drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
res = register_chrdev(0, "aux", &auxdev_fops);
if (res < 0)
goto out;
drm_dev_major = res;
return 0;
out:
class_destroy(drm_dp_aux_dev_class);
return res;
}
void drm_dp_aux_dev_exit(void)
{
unregister_chrdev(drm_dev_major, "aux");
class_destroy(drm_dp_aux_dev_class);
}

View File

@ -0,0 +1,451 @@
// SPDX-License-Identifier: GPL-2.0
/*
* DisplayPort CEC-Tunneling-over-AUX support
*
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <media/cec.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
* have a converter chip that supports CEC-Tunneling-over-AUX (usually the
* Parade PS176), but they do not wire up the CEC pin, thus making CEC
* useless. Note that MegaChips 2900-based adapters appear to have good
* support for CEC tunneling. Those adapters that I have tested using
* this chipset all have the CEC line connected.
*
* Sadly there is no way for this driver to know this. What happens is
* that a /dev/cecX device is created that is isolated and unable to see
* any of the other CEC devices. Quite literally the CEC wire is cut
* (or in this case, never connected in the first place).
*
* The reason so few adapters support this is that this tunneling protocol
* was never supported by any OS. So there was no easy way of testing it,
* and no incentive to correctly wire up the CEC pin.
*
* Hopefully by creating this driver it will be easier for vendors to
* finally fix their adapters and test the CEC functionality.
*
* I keep a list of known working adapters here:
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
* Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
* As far as I can see there is no mechanism defined in the DisplayPort
* standard to transport CEC interrupts over an MST device. It might be
* possible to do this through polling, but I have not been able to get that
* to work.
*/
/**
* DOC: dp cec helpers
*
* These functions take care of supporting the CEC-Tunneling-over-AUX
* feature of DisplayPort-to-HDMI adapters.
*/
/*
* When the EDID is unset because the HPD went low, then the CEC DPCD registers
* typically can no longer be read (true for a DP-to-HDMI adapter since it is
* powered by the HPD). However, some displays toggle the HPD off and on for a
* short period for one reason or another, and that would cause the CEC adapter
* to be removed and added again, even though nothing else changed.
*
* This module parameter sets a delay in seconds before the CEC adapter is
* actually unregistered. Only if the HPD does not return within that time will
* the CEC adapter be unregistered.
*
* If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
* be unregistered for as long as the connector remains registered.
*
* If it is set to 0, then the CEC adapter will be unregistered immediately as
* soon as the HPD disappears.
*
* The default is one second to prevent short HPD glitches from unregistering
* the CEC adapter.
*
* Note that for integrated HDMI branch devices that support CEC the DPCD
* registers remain available even if the HPD goes low since it is not powered
* by the HPD. In that case the CEC adapter will never be unregistered during
* the life time of the connector. At least, this is the theory since I do not
* have hardware with an integrated HDMI branch device that supports CEC.
*/
#define NEVER_UNREG_DELAY 1000
static unsigned int drm_dp_cec_unregister_delay = 1;
module_param(drm_dp_cec_unregister_delay, uint, 0600);
MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
"CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
/* Bit 15 (logical address 15) should always be set */
u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
u8 mask[2];
ssize_t err;
if (addr != CEC_LOG_ADDR_INVALID)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
unsigned int retries = min(5, attempts - 1);
ssize_t err;
err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
msg->msg, msg->len);
if (err < 0)
return err;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
(msg->len - 1) | (retries << 4) |
DP_CEC_TX_MESSAGE_SEND);
return err < 0 ? err : 0;
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
bool enable)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
ssize_t err;
u8 val;
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
if (err >= 0) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
static void drm_dp_cec_adap_status(struct cec_adapter *adap,
struct seq_file *file)
{
struct drm_dp_aux *aux = cec_get_drvdata(adap);
struct drm_dp_desc desc;
struct drm_dp_dpcd_ident *id = &desc.ident;
if (drm_dp_read_desc(aux, &desc, true))
return;
seq_printf(file, "OUI: %*phD\n",
(int)sizeof(id->oui), id->oui);
seq_printf(file, "ID: %*pE\n",
(int)strnlen(id->device_id, sizeof(id->device_id)),
id->device_id);
seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
/*
* Show this both in decimal and hex: at least one vendor
* always reports this in hex.
*/
seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
id->sw_major_rev, id->sw_minor_rev,
id->sw_major_rev, id->sw_minor_rev);
}
static const struct cec_adap_ops drm_dp_cec_adap_ops = {
.adap_enable = drm_dp_cec_adap_enable,
.adap_log_addr = drm_dp_cec_adap_log_addr,
.adap_transmit = drm_dp_cec_adap_transmit,
.adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
.adap_status = drm_dp_cec_adap_status,
};
static int drm_dp_cec_received(struct drm_dp_aux *aux)
{
struct cec_adapter *adap = aux->cec.adap;
struct cec_msg msg;
u8 rx_msg_info;
ssize_t err;
err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
cec_received_msg(adap, &msg);
return 0;
}
static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
{
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
drm_dp_cec_received(aux);
if (flags & DP_CEC_TX_MESSAGE_SENT)
cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
else if (flags & DP_CEC_TX_LINE_ERROR)
cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
CEC_TX_STATUS_MAX_RETRIES);
else if (flags &
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
* drm_dp_cec_irq() - handle CEC interrupt, if any
* @aux: DisplayPort AUX channel
*
* Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
* is present, then it will check for a CEC_IRQ and handle it accordingly.
*/
void drm_dp_cec_irq(struct drm_dp_aux *aux)
{
u8 cec_irq;
int ret;
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
&cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_irq);
static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
*cec_cap = cap;
return true;
}
/*
* Called if the HPD was low for more than drm_dp_cec_unregister_delay
* seconds. This unregisters the CEC adapter.
*/
static void drm_dp_cec_unregister_work(struct work_struct *work)
{
struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
cec.unregister_work.work);
mutex_lock(&aux->cec.lock);
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
mutex_unlock(&aux->cec.lock);
}
/*
* A new EDID is set. If there is no CEC adapter, then create one. If
* there was a CEC adapter, then check if the CEC adapter properties
* were unchanged and just update the CEC physical address. Otherwise
* unregister the old CEC adapter and create a new one.
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
struct drm_connector *connector = aux->cec.connector;
u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
CEC_CAP_CONNECTOR_INFO;
struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
#ifndef CONFIG_MEDIA_CEC_RC
/*
* CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
* cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
*
* Do this here as well to ensure the tests against cec_caps are
* correct.
*/
cec_caps &= ~CEC_CAP_RC;
#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
if (!drm_dp_cec_cap(aux, &cap)) {
/* CEC is not supported, unregister any existing adapter */
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
goto unlock;
}
if (cap & DP_CEC_SNOOPING_CAPABLE)
cec_caps |= CEC_CAP_MONITOR_ALL;
if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
if (aux->cec.adap->capabilities == cec_caps &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr_from_edid(aux->cec.adap, edid);
goto unlock;
}
/*
* The capabilities changed, so unregister the old
* adapter first.
*/
cec_unregister_adapter(aux->cec.adap);
}
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
cec_fill_conn_info_from_drm(&conn_info, connector);
cec_s_conn_info(aux->cec.adap, &conn_info);
if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
/*
* Update the phys addr for the new CEC adapter. When called
* from drm_dp_cec_register_connector() edid == NULL, so in
* that case the phys addr is just invalidated.
*/
cec_s_phys_addr_from_edid(aux->cec.adap, edid);
}
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_set_edid);
/*
* The EDID disappeared (likely because of the HPD going down).
*/
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
{
/* No transfer function was set, so not a DP connector */
if (!aux->transfer)
return;
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
cec_phys_addr_invalidate(aux->cec.adap);
/*
* We're done if we want to keep the CEC device
* (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
* DPCD still indicates the CEC capability (expected for an integrated
* HDMI branch device).
*/
if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
!drm_dp_cec_cap(aux, NULL)) {
/*
* Unregister the CEC adapter after drm_dp_cec_unregister_delay
* seconds. This to debounce short HPD off-and-on cycles from
* displays.
*/
schedule_delayed_work(&aux->cec.unregister_work,
drm_dp_cec_unregister_delay * HZ);
}
unlock:
mutex_unlock(&aux->cec.lock);
}
EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
* @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
EXPORT_SYMBOL(drm_dp_cec_register_connector);
/**
* drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
* @aux: DisplayPort AUX channel
*/
void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
{
if (!aux->cec.adap)
return;
cancel_delayed_work_sync(&aux->cec.unregister_work);
cec_unregister_adapter(aux->cec.adap);
aux->cec.adap = NULL;
}
EXPORT_SYMBOL(drm_dp_cec_unregister_connector);

View File

@ -0,0 +1,530 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
/**
* DOC: dp dual mode helpers
*
* Helper functions to deal with DP dual mode (aka. DP++) adaptors.
*
* Type 1:
* Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
*
* Type 2:
* Adaptor registers and sink DDC bus can be accessed either via I2C or
* I2C-over-AUX. Source devices may choose to implement either of these
* access methods.
*/
#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
/**
* drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
* @size: sizo of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
.buf = &offset,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
.len = size,
.buf = buffer,
},
};
int ret;
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_read);
/**
* drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
* @size: sizo of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
*
* Returns:
* 0 on success, negative error code on failure
*/
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size)
{
struct i2c_msg msg = {
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1 + size,
.buf = NULL,
};
void *data;
int ret;
data = kmalloc(msg.len, GFP_KERNEL);
if (!data)
return -ENOMEM;
msg.buf = data;
memcpy(data, &offset, 1);
memcpy(data + 1, buffer, size);
ret = i2c_transfer(adapter, &msg, 1);
kfree(data);
if (ret < 0)
return ret;
if (ret != 1)
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)
{
return adaptor_id == 0 || adaptor_id == 0xff;
}
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
const uint8_t adaptor_id)
{
return is_hdmi_adaptor(hdmi_id) &&
(adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_TYPE_HAS_DPCD));
}
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @dev: &drm_device to use
* @adapter: I2C adapter for the DDC bus
*
* Attempt to identify the type of the DP dual mode adaptor used.
*
* Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
* certain whether we're dealing with a native HDMI port or
* a type 1 DVI dual mode adaptor. The driver will have to use
* some other hardware/driver specific mechanism to make that
* distinction.
*
* Returns:
* The type of the DP dual mode adaptor used
*/
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
struct i2c_adapter *adapter)
{
char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
uint8_t adaptor_id = 0x00;
ssize_t ret;
/*
* Let's see if the adaptor is there the by reading the
* HDMI ID registers.
*
* Note that type 1 DVI adaptors are not required to implemnt
* any registers, and that presents a problem for detection.
* If the i2c transfer is nacked, we may or may not be dealing
* with a type 1 DVI adaptor. Some other mechanism of detecting
* the presence of the adaptor is required. One way would be
* to check the state of the CONFIG1 pin, Another method would
* simply require the driver to know whether the port is a DP++
* port or a native HDMI port. Both of these methods are entirely
* hardware/driver specific so we can't deal with them here.
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
drm_dbg_kms(dev, "DP dual mode HDMI ID: %*pE (err %zd)\n",
ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
/*
* Sigh. Some (maybe all?) type 1 adaptors are broken and ack
* the offset but ignore it, and instead they just always return
* data from the start of the HDMI ID buffer. So for a broken
* type 1 HDMI adaptor a single byte read will always give us
* 0x44, and for a type 1 DVI adaptor it should give 0x00
* (assuming it implements any registers). Fortunately neither
* of those values will match the type 2 signature of the
* DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
* the type 2 adaptor detection safely even in the presence
* of broken type 1 adaptors.
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
/*
* If neither a proper type 1 ID nor a broken type 1 adaptor
* as described above, assume type 1, but let the user know
* that we may have misdetected the type.
*/
if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
}
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE1_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE1_DVI;
}
EXPORT_SYMBOL(drm_dp_dual_mode_detect);
/**
* drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
*
* Determine the max TMDS clock the adaptor supports based on the
* type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
* register (on type2 adaptors). As some type 1 adaptors have
* problems with registers (see comments in drm_dp_dual_mode_detect())
* we don't read the register on those, instead we simply assume
* a 165 MHz limit based on the specification.
*
* Returns:
* Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
*/
int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter)
{
uint8_t max_tmds_clock;
ssize_t ret;
/* native HDMI so no limit */
if (type == DRM_DP_DUAL_MODE_NONE)
return 0;
/*
* Type 1 adaptors are limited to 165MHz
* Type 2 adaptors can tells us their limit
*/
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 165000;
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
&max_tmds_clock, sizeof(max_tmds_clock));
if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
drm_dbg_kms(dev, "Failed to query max TMDS clock\n");
return 165000;
}
return max_tmds_clock * 5000 / 2;
}
EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
/**
* drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enabled: current state of the TMDS output buffers
*
* Get the state of the TMDS output buffers in the adaptor. For
* type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
* register. As some type 1 adaptors have problems with registers
* (see comments in drm_dp_dual_mode_detect()) we don't read the
* register on those, instead we simply assume that the buffers
* are always enabled.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev,
enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter,
bool *enabled)
{
uint8_t tmds_oen;
ssize_t ret;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
*enabled = true;
return 0;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
drm_dbg_kms(dev, "Failed to query state of TMDS output buffers\n");
return ret;
}
*enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
return 0;
}
EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
/**
* drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
* @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
* type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
* some type 1 adaptors have problems with registers (see comments
* in drm_dp_dual_mode_detect()) we avoid touching the register,
* making this function a no-op on type 1 adaptors.
*
* Returns:
* 0 on success, negative error code on failure
*/
int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable)
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
/*
* LSPCON adapters in low-power state may ignore the first write, so
* read back and verify the written value a few times.
*/
for (retry = 0; retry < 3; retry++) {
uint8_t tmp;
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
drm_dbg_kms(dev, "Failed to %s TMDS output buffers (%d attempts)\n",
enable ? "enable" : "disable", retry + 1);
return ret;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmp, sizeof(tmp));
if (ret) {
drm_dbg_kms(dev,
"I2C read failed during TMDS output buffer %s (%d attempts)\n",
enable ? "enabling" : "disabling", retry + 1);
return ret;
}
if (tmp == tmds_oen)
return 0;
}
drm_dbg_kms(dev, "I2C write value mismatch during TMDS output buffer %s\n",
enable ? "enabling" : "disabling");
return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
/**
* drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
* @type: DP dual mode adaptor type
*
* Returns:
* String representation of the DP dual mode adaptor type
*/
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
{
switch (type) {
case DRM_DP_DUAL_MODE_NONE:
return "none";
case DRM_DP_DUAL_MODE_TYPE1_DVI:
return "type 1 DVI";
case DRM_DP_DUAL_MODE_TYPE1_HDMI:
return "type 1 HDMI";
case DRM_DP_DUAL_MODE_TYPE2_DVI:
return "type 2 DVI";
case DRM_DP_DUAL_MODE_TYPE2_HDMI:
return "type 2 HDMI";
case DRM_DP_DUAL_MODE_LSPCON:
return "lspcon";
default:
WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
return "unknown";
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/**
* drm_lspcon_get_mode: Get LSPCON's current mode of operation by
* reading offset (0x80, 0x41)
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: current lspcon mode of operation output variable
*
* Returns:
* 0 on success, sets the current_mode value to appropriate mode
* -error on failure
*/
int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *mode)
{
u8 data;
int ret = 0;
int retry;
if (!mode) {
drm_err(dev, "NULL input\n");
return -EINVAL;
}
/* Read Status: i2c over aux */
for (retry = 0; retry < 6; retry++) {
if (retry)
usleep_range(500, 1000);
ret = drm_dp_dual_mode_read(adapter,
DP_DUAL_MODE_LSPCON_CURRENT_MODE,
&data, sizeof(data));
if (!ret)
break;
}
if (ret < 0) {
drm_dbg_kms(dev, "LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
*mode = DRM_LSPCON_MODE_PCON;
else
*mode = DRM_LSPCON_MODE_LS;
return 0;
}
EXPORT_SYMBOL(drm_lspcon_get_mode);
/**
* drm_lspcon_set_mode: Change LSPCON's mode of operation by
* writing offset (0x80, 0x40)
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode mode)
{
u8 data = 0;
int ret;
int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
data = DP_DUAL_MODE_LSPCON_MODE_PCON;
/* Change mode */
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
&data, sizeof(data));
if (ret < 0) {
drm_err(dev, "LSPCON mode change failed\n");
return ret;
}
/*
* Confirm mode change by reading the status bit.
* Sometimes, it takes a while to change the mode,
* so wait and retry until time out or done.
*/
do {
ret = drm_lspcon_get_mode(dev, adapter, &current_mode);
if (ret) {
drm_err(dev, "can't confirm LSPCON mode change\n");
return ret;
} else {
if (current_mode != mode) {
msleep(10);
time_out -= 10;
} else {
drm_dbg_kms(dev, "LSPCON mode changed to %s\n",
mode == DRM_LSPCON_MODE_LS ? "LS" : "PCON");
return 0;
}
}
} while (time_out);
drm_err(dev, "LSPCON mode change timed out\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL(drm_lspcon_set_mode);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: MIT */
#ifndef DRM_DP_HELPER_INTERNAL_H
#define DRM_DP_HELPER_INTERNAL_H
struct drm_dp_aux;
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
#else
static inline int drm_dp_aux_dev_init(void)
{
return 0;
}
static inline void drm_dp_aux_dev_exit(void)
{
}
static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
return 0;
}
static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Declarations for DP MST related functions which are only used in selftests
*
* Copyright © 2018 Red Hat
* Authors:
* Lyude Paul <lyude@redhat.com>
*/
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
#include <drm/display/drm_dp_mst_helper.h>
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
struct drm_dp_sideband_msg_tx *raw);
int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
struct drm_dp_sideband_msg_req_body *req);
void
drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
int indent, struct drm_printer *printer);
#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */

View File

@ -0,0 +1,409 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corp
*
* Author:
* Manasi Navare <manasi.d.navare@intel.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_print.h>
/**
* DOC: dsc helpers
*
* VESA specification for DP 1.4 adds a new feature called Display Stream
* Compression (DSC) used to compress the pixel bits before sending it on
* DP/eDP/MIPI DSI interface. DSC is required to be enabled so that the existing
* display interfaces can support high resolutions at higher frames rates uisng
* the maximum available link capacity of these interfaces.
*
* These functions contain some common logic and helpers to deal with VESA
* Display Stream Compression standard required for DSC on Display Port/eDP or
* MIPI display interfaces.
*/
/**
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
* @pps_header: Secondary data packet header for DSC Picture
* Parameter Set as defined in &struct dp_sdp_header
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
* This function populates the SDP header defined in
* &struct dp_sdp_header.
*/
void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
{
memset(pps_header, 0, sizeof(*pps_header));
pps_header->HB1 = DP_SDP_PPS;
pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
}
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
* drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
* @rc_buffer_block_size: block size code, according to DPCD offset 62h
* @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
*
* return:
* buffer size in bytes, or 0 on invalid input
*/
int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
{
int size = 1024 * (rc_buffer_size + 1);
switch (rc_buffer_block_size) {
case DP_DSC_RC_BUF_BLK_SIZE_1:
return 1 * size;
case DP_DSC_RC_BUF_BLK_SIZE_4:
return 4 * size;
case DP_DSC_RC_BUF_BLK_SIZE_16:
return 16 * size;
case DP_DSC_RC_BUF_BLK_SIZE_64:
return 64 * size;
default:
return 0;
}
}
EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
/**
* drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
* @pps_payload:
* Bitwise struct for DSC Picture Parameter Set. This is defined
* by &struct drm_dsc_picture_parameter_set
* @dsc_cfg:
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
* DSC source device sends a picture parameter set (PPS) containing the
* information required by the sink to decode the compressed frame. Driver
* populates the DSC PPS struct using the DSC configuration parameters in
* the order expected by the DSC Display Sink device. For the DSC, the sink
* device expects the PPS payload in big endian format for fields
* that span more than 1 byte.
*/
void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
const struct drm_dsc_config *dsc_cfg)
{
int i;
/* Protect against someone accidentally changing struct size */
BUILD_BUG_ON(sizeof(*pps_payload) !=
DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
memset(pps_payload, 0, sizeof(*pps_payload));
/* PPS 0 */
pps_payload->dsc_version =
dsc_cfg->dsc_version_minor |
dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
/* PPS 1, 2 is 0 */
/* PPS 3 */
pps_payload->pps_3 =
dsc_cfg->line_buf_depth |
dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
/* PPS 4 */
pps_payload->pps_4 =
((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT) |
dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
/* PPS 5 */
pps_payload->bits_per_pixel_low =
(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
/*
* The DSC panel expects the PPS packet to have big endian format
* for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
* to big endian format. If format is little endian, it will swap
* bytes to convert to Big endian else keep it unchanged.
*/
/* PPS 6, 7 */
pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
/* PPS 8, 9 */
pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
/* PPS 10, 11 */
pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
/* PPS 12, 13 */
pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
/* PPS 14, 15 */
pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
/* PPS 16 */
pps_payload->initial_xmit_delay_high =
((dsc_cfg->initial_xmit_delay &
DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 17 */
pps_payload->initial_xmit_delay_low =
(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
/* PPS 18, 19 */
pps_payload->initial_dec_delay =
cpu_to_be16(dsc_cfg->initial_dec_delay);
/* PPS 20 is 0 */
/* PPS 21 */
pps_payload->initial_scale_value =
dsc_cfg->initial_scale_value;
/* PPS 22, 23 */
pps_payload->scale_increment_interval =
cpu_to_be16(dsc_cfg->scale_increment_interval);
/* PPS 24 */
pps_payload->scale_decrement_interval_high =
((dsc_cfg->scale_decrement_interval &
DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 25 */
pps_payload->scale_decrement_interval_low =
(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
/* PPS 26[7:0], PPS 27[7:5] RESERVED */
/* PPS 27 */
pps_payload->first_line_bpg_offset =
dsc_cfg->first_line_bpg_offset;
/* PPS 28, 29 */
pps_payload->nfl_bpg_offset =
cpu_to_be16(dsc_cfg->nfl_bpg_offset);
/* PPS 30, 31 */
pps_payload->slice_bpg_offset =
cpu_to_be16(dsc_cfg->slice_bpg_offset);
/* PPS 32, 33 */
pps_payload->initial_offset =
cpu_to_be16(dsc_cfg->initial_offset);
/* PPS 34, 35 */
pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
/* PPS 36 */
pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
/* PPS 37 */
pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
/* PPS 40 */
pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
/* PPS 41 */
pps_payload->rc_quant_incr_limit0 =
dsc_cfg->rc_quant_incr_limit0;
/* PPS 42 */
pps_payload->rc_quant_incr_limit1 =
dsc_cfg->rc_quant_incr_limit1;
/* PPS 43 */
pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
/* PPS 44 - 57 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
pps_payload->rc_buf_thresh[i] =
dsc_cfg->rc_buf_thresh[i];
/* PPS 58 - 87 */
/*
* For DSC sink programming the RC Range parameter fields
* are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
DSC_PPS_RC_RANGE_MINQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_max_qp <<
DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
pps_payload->native_422_420 = dsc_cfg->native_422 |
dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
/* PPS 89 */
pps_payload->second_line_bpg_offset =
dsc_cfg->second_line_bpg_offset;
/* PPS 90, 91 */
pps_payload->nsl_bpg_offset =
cpu_to_be16(dsc_cfg->nsl_bpg_offset);
/* PPS 92, 93 */
pps_payload->second_line_offset_adj =
cpu_to_be16(dsc_cfg->second_line_offset_adj);
/* PPS 94 - 127 are O */
}
EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
/**
* drm_dsc_compute_rc_parameters() - Write rate control
* parameters to the dsc configuration defined in
* &struct drm_dsc_config in accordance with the DSC 1.2
* specification. Some configuration fields must be present
* beforehand.
*
* @vdsc_cfg:
* DSC Configuration data partially filled by driver
*/
int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
{
unsigned long groups_per_line = 0;
unsigned long groups_total = 0;
unsigned long num_extra_mux_bits = 0;
unsigned long slice_bits = 0;
unsigned long hrd_delay = 0;
unsigned long final_scale = 0;
unsigned long rbs_min = 0;
if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
/* Number of groups used to code each line of a slice */
groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
DSC_RC_PIXELS_PER_GROUP);
/* chunksize in Bytes */
vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
vdsc_cfg->bits_per_pixel,
(8 * 16));
} else {
/* Number of groups used to code each line of a slice */
groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
DSC_RC_PIXELS_PER_GROUP);
/* chunksize in Bytes */
vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
vdsc_cfg->bits_per_pixel,
(8 * 16));
}
if (vdsc_cfg->convert_rgb)
num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4)
- 2);
else if (vdsc_cfg->native_422)
num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4) +
3 * (4 * vdsc_cfg->bits_per_component) - 2;
else
num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
(4 * vdsc_cfg->bits_per_component + 4) +
2 * (4 * vdsc_cfg->bits_per_component) - 2;
/* Number of bits in one Slice */
slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
while ((num_extra_mux_bits > 0) &&
((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
num_extra_mux_bits--;
if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
vdsc_cfg->initial_scale_value = groups_per_line + 8;
/* scale_decrement_interval calculation according to DSC spec 1.11 */
if (vdsc_cfg->initial_scale_value > 8)
vdsc_cfg->scale_decrement_interval = groups_per_line /
(vdsc_cfg->initial_scale_value - 8);
else
vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
(vdsc_cfg->initial_xmit_delay *
vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
return -ERANGE;
}
final_scale = (vdsc_cfg->rc_model_size * 8) /
(vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
if (vdsc_cfg->slice_height > 1)
/*
* NflBpgOffset is 16 bit value with 11 fractional bits
* hence we multiply by 2^11 for preserving the
* fractional part
*/
vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
(vdsc_cfg->slice_height - 1));
else
vdsc_cfg->nfl_bpg_offset = 0;
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
/* slice_bpg_offset is 16 bit value with 11 fractional bits */
vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
vdsc_cfg->initial_offset +
num_extra_mux_bits) << 11),
groups_total);
if (final_scale > 9) {
/*
* ScaleIncrementInterval =
* finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
* as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
* we need divide by 2^11 from pstDscCfg values
*/
vdsc_cfg->scale_increment_interval =
(vdsc_cfg->final_offset * (1 << 11)) /
((vdsc_cfg->nfl_bpg_offset +
vdsc_cfg->slice_bpg_offset) *
(final_scale - 9));
} else {
/*
* If finalScaleValue is less than or equal to 9, a value of 0 should
* be used to disable the scale increment at the end of the slice
*/
vdsc_cfg->scale_increment_interval = 0;
}
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
* in steps of 1/16 of a bit per pixel
*/
rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
vdsc_cfg->bits_per_pixel, 16) +
groups_per_line * vdsc_cfg->first_line_bpg_offset;
hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
return 0;
}
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);

View File

@ -0,0 +1,421 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Intel Corporation.
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_print.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_connector.h>
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
ksv[0], ksv[1], ksv[2], ksv[3], ksv[4]);
}
static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
{
u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz;
while (parsed_bytes < vrls_length) {
vrl_ksv_cnt = *buf;
ksv_count += vrl_ksv_cnt;
vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1;
buf += vrl_sz;
parsed_bytes += vrl_sz;
}
/*
* When vrls are not valid, ksvs are not considered.
* Hence SRM will be discarded.
*/
if (parsed_bytes != vrls_length)
ksv_count = 0;
return ksv_count;
}
static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN;
buf++;
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
} while (parsed_bytes < vrls_length);
return ksv_count;
}
static inline u32 get_vrl_length(const u8 *buf)
{
return drm_hdcp_be24_to_cpu(buf);
}
static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_1_4_VRL_LENGTH_SIZE + DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id,
be16_to_cpu(header->srm_version), header->srm_gen_no);
WARN_ON(header->reserved);
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE;
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return 0;
}
*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
*revoked_ksv_cnt = 0;
kfree(*revoked_ksv_list);
return -EINVAL;
}
*revoked_ksv_cnt = ksv_count;
return 0;
}
static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_2_VRL_LENGTH_SIZE + DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id & DRM_HDCP_SRM_ID_MASK,
be16_to_cpu(header->srm_version), header->srm_gen_no);
if (header->reserved)
return -EINVAL;
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_2_VRL_LENGTH_SIZE;
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return 0;
}
*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
ksv_sz = ksv_count * DRM_HDCP_KSV_LEN;
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
memcpy(*revoked_ksv_list, buf, ksv_sz);
*revoked_ksv_cnt = ksv_count;
return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_1_4_SRM_ID << 4);
}
static inline bool is_srm_version_hdcp2(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
static int drm_hdcp_srm_update(const u8 *buf, size_t count,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
return -EINVAL;
if (is_srm_version_hdcp1(buf))
return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
revoked_ksv_cnt);
else
return -EINVAL;
}
static int drm_hdcp_request_srm(struct drm_device *drm_dev,
u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
drm_dev->dev);
if (ret < 0) {
*revoked_ksv_cnt = 0;
*revoked_ksv_list = NULL;
ret = 0;
goto exit;
}
if (fw->size && fw->data)
ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
revoked_ksv_cnt);
exit:
release_firmware(fw);
return ret;
}
/**
* drm_hdcp_check_ksvs_revoked - Check the revoked status of the IDs
*
* @drm_dev: drm_device for which HDCP revocation check is requested
* @ksvs: List of KSVs (HDCP receiver IDs)
* @ksv_count: KSV count passed in through @ksvs
*
* This function reads the HDCP System renewability Message(SRM Table)
* from userspace as a firmware and parses it for the revoked HDCP
* KSVs(Receiver IDs) detected by DCP LLC. Once the revoked KSVs are known,
* revoked state of the KSVs in the list passed in by display drivers are
* decided and response is sent.
*
* SRM should be presented in the name of "display_hdcp_srm.bin".
*
* Format of the SRM table, that userspace needs to write into the binary file,
* is defined at:
* 1. Renewability chapter on 55th page of HDCP 1.4 specification
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20Specification%20Rev1_4_Secure.pdf
* 2. Renewability chapter on 63rd page of HDCP 2.2 specification
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
* Count of the revoked KSVs or -ve error number in case of the failure.
*/
int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
u32 ksv_count)
{
u32 revoked_ksv_cnt = 0, i, j;
u8 *revoked_ksv_list = NULL;
int ret = 0;
ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
&revoked_ksv_cnt);
if (ret)
return ret;
/* revoked_ksv_cnt will be zero when above function failed */
for (i = 0; i < revoked_ksv_cnt; i++)
for (j = 0; j < ksv_count; j++)
if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
&revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
DRM_HDCP_KSV_LEN)) {
DRM_DEBUG("Revoked KSV is ");
drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
ret++;
}
kfree(revoked_ksv_list);
return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
{ DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
static struct drm_prop_enum_list drm_hdcp_content_type_enum_list[] = {
{ DRM_MODE_HDCP_CONTENT_TYPE0, "HDCP Type0" },
{ DRM_MODE_HDCP_CONTENT_TYPE1, "HDCP Type1" },
};
DRM_ENUM_NAME_FN(drm_get_hdcp_content_type_name,
drm_hdcp_content_type_enum_list)
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
* @hdcp_content_type: is HDCP Content Type property needed for connector
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
* When hdcp_content_type is true enum property called HDCP Content Type is
* created (if it is not already) and attached to the connector.
*
* This property is used for sending the protected content's stream type
* from userspace to kernel on selected connectors. Protected content provider
* will decide their type of their content and declare the same to kernel.
*
* Content type will be used during the HDCP 2.2 authentication.
* Content type will be set to &drm_connector_state.hdcp_content_type.
*
* The content protection will be set to &drm_connector_state.content_protection
*
* When kernel triggered content protection state change like DESIRED->ENABLED
* and ENABLED->DESIRED, will use drm_hdcp_update_content_protection() to update
* the content protection state of a connector.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
struct drm_connector *connector, bool hdcp_content_type)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop =
dev->mode_config.content_protection_property;
if (!prop)
prop = drm_property_create_enum(dev, 0, "Content Protection",
drm_cp_enum_list,
ARRAY_SIZE(drm_cp_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
dev->mode_config.content_protection_property = prop;
if (!hdcp_content_type)
return 0;
prop = dev->mode_config.hdcp_content_type_property;
if (!prop)
prop = drm_property_create_enum(dev, 0, "HDCP Content Type",
drm_hdcp_content_type_enum_list,
ARRAY_SIZE(
drm_hdcp_content_type_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_HDCP_CONTENT_TYPE0);
dev->mode_config.hdcp_content_type_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
/**
* drm_hdcp_update_content_protection - Updates the content protection state
* of a connector
*
* @connector: drm_connector on which content protection state needs an update
* @val: New state of the content protection property
*
* This function can be used by display drivers, to update the kernel triggered
* content protection state changes of a drm_connector such as DESIRED->ENABLED
* and ENABLED->DESIRED. No uevent for DESIRED->UNDESIRED or ENABLED->UNDESIRED,
* as userspace is triggering such state change and kernel performs it without
* fail.This function update the new state of the property into the connector's
* state and generate an uevent to notify the userspace.
*/
void drm_hdcp_update_content_protection(struct drm_connector *connector,
u64 val)
{
struct drm_device *dev = connector->dev;
struct drm_connector_state *state = connector->state;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (state->content_protection == val)
return;
state->content_protection = val;
drm_sysfs_connector_status_event(connector,
dev->mode_config.content_protection_property);
}
EXPORT_SYMBOL(drm_hdcp_update_content_protection);

View File

@ -0,0 +1,199 @@
// SPDX-License-Identifier: MIT
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/drm_property.h>
static inline bool is_eotf_supported(u8 output_eotf, u8 sink_eotf)
{
return sink_eotf & BIT(output_eotf);
}
/**
* drm_hdmi_infoframe_set_hdr_metadata() - fill an HDMI DRM infoframe with
* HDR metadata from userspace
* @frame: HDMI DRM infoframe
* @conn_state: Connector state containing HDR metadata
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
const struct drm_connector_state *conn_state)
{
struct drm_connector *connector;
struct hdr_output_metadata *hdr_metadata;
int err;
if (!frame || !conn_state)
return -EINVAL;
connector = conn_state->connector;
if (!conn_state->hdr_output_metadata)
return -EINVAL;
hdr_metadata = conn_state->hdr_output_metadata->data;
if (!hdr_metadata || !connector)
return -EINVAL;
/* Sink EOTF is Bit map while infoframe is absolute values */
if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
connector->hdr_sink_metadata.hdmi_type1.eotf)) {
DRM_DEBUG_KMS("EOTF Not Supported\n");
return -EINVAL;
}
err = hdmi_drm_infoframe_init(frame);
if (err < 0)
return err;
frame->eotf = hdr_metadata->hdmi_metadata_type1.eotf;
frame->metadata_type = hdr_metadata->hdmi_metadata_type1.metadata_type;
BUILD_BUG_ON(sizeof(frame->display_primaries) !=
sizeof(hdr_metadata->hdmi_metadata_type1.display_primaries));
BUILD_BUG_ON(sizeof(frame->white_point) !=
sizeof(hdr_metadata->hdmi_metadata_type1.white_point));
memcpy(&frame->display_primaries,
&hdr_metadata->hdmi_metadata_type1.display_primaries,
sizeof(frame->display_primaries));
memcpy(&frame->white_point,
&hdr_metadata->hdmi_metadata_type1.white_point,
sizeof(frame->white_point));
frame->max_display_mastering_luminance =
hdr_metadata->hdmi_metadata_type1.max_display_mastering_luminance;
frame->min_display_mastering_luminance =
hdr_metadata->hdmi_metadata_type1.min_display_mastering_luminance;
frame->max_fall = hdr_metadata->hdmi_metadata_type1.max_fall;
frame->max_cll = hdr_metadata->hdmi_metadata_type1.max_cll;
return 0;
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
/* HDMI Colorspace Spec Definitions */
#define FULL_COLORIMETRY_MASK 0x1FF
#define NORMAL_COLORIMETRY_MASK 0x3
#define EXTENDED_COLORIMETRY_MASK 0x7
#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
#define C(x) ((x) << 0)
#define EC(x) ((x) << 2)
#define ACE(x) ((x) << 5)
#define HDMI_COLORIMETRY_NO_DATA 0x0
#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
static const u32 hdmi_colorimetry_val[] = {
[DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
[DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
[DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
[DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
[DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
[DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
[DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
[DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
[DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
[DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
[DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
};
#undef C
#undef EC
#undef ACE
/**
* drm_hdmi_avi_infoframe_colorimetry() - fill the HDMI AVI infoframe
* colorimetry information
* @frame: HDMI AVI infoframe
* @conn_state: connector state
*/
void drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
u32 colorimetry_val;
u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
else
colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
/*
* ToDo: Extend it for ACE formats as well. Modify the infoframe
* structure and extend it in drivers/video/hdmi
*/
frame->extended_colorimetry = (colorimetry_val >> 2) &
EXTENDED_COLORIMETRY_MASK;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorimetry);
/**
* drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
* bar information
* @frame: HDMI AVI infoframe
* @conn_state: connector state
*/
void drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
frame->right_bar = conn_state->tv.margins.right;
frame->left_bar = conn_state->tv.margins.left;
frame->top_bar = conn_state->tv.margins.top;
frame->bottom_bar = conn_state->tv.margins.bottom;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
/**
* drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
* content type information, based
* on correspondent DRM property.
* @frame: HDMI AVI infoframe
* @conn_state: DRM display connector state
*
*/
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state)
{
switch (conn_state->content_type) {
case DRM_MODE_CONTENT_TYPE_GRAPHICS:
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
break;
case DRM_MODE_CONTENT_TYPE_CINEMA:
frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
break;
case DRM_MODE_CONTENT_TYPE_GAME:
frame->content_type = HDMI_CONTENT_TYPE_GAME;
break;
case DRM_MODE_CONTENT_TYPE_PHOTO:
frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
break;
default:
/* Graphics is the default(0) */
frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
}
frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);

View File

@ -0,0 +1,250 @@
/*
* Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_print.h>
/**
* DOC: scdc helpers
*
* Status and Control Data Channel (SCDC) is a mechanism introduced by the
* HDMI 2.0 specification. It is a point-to-point protocol that allows the
* HDMI source and HDMI sink to exchange data. The same I2C interface that
* is used to access EDID serves as the transport mechanism for SCDC.
*/
#define SCDC_I2C_SLAVE_ADDRESS 0x54
/**
* drm_scdc_read - read a block of data from SCDC
* @adapter: I2C controller
* @offset: start offset of block to read
* @buffer: return location for the block to read
* @size: size of the block to read
*
* Reads a block of data from SCDC, starting at a given offset.
*
* Returns:
* 0 on success, negative error code on failure.
*/
ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
size_t size)
{
int ret;
struct i2c_msg msgs[2] = {
{
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
.buf = &offset,
}, {
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = I2C_M_RD,
.len = size,
.buf = buffer,
}
};
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_scdc_read);
/**
* drm_scdc_write - write a block of data to SCDC
* @adapter: I2C controller
* @offset: start offset of block to write
* @buffer: block of data to write
* @size: size of the block to write
*
* Writes a block of data to SCDC, starting at a given offset.
*
* Returns:
* 0 on success, negative error code on failure.
*/
ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
const void *buffer, size_t size)
{
struct i2c_msg msg = {
.addr = SCDC_I2C_SLAVE_ADDRESS,
.flags = 0,
.len = 1 + size,
.buf = NULL,
};
void *data;
int err;
data = kmalloc(1 + size, GFP_KERNEL);
if (!data)
return -ENOMEM;
msg.buf = data;
memcpy(data, &offset, sizeof(offset));
memcpy(data + 1, buffer, size);
err = i2c_transfer(adapter, &msg, 1);
kfree(data);
if (err < 0)
return err;
if (err != 1)
return -EPROTO;
return 0;
}
EXPORT_SYMBOL(drm_scdc_write);
/**
* drm_scdc_get_scrambling_status - what is status of scrambling?
* @adapter: I2C adapter for DDC channel
*
* Reads the scrambler status over SCDC, and checks the
* scrambling status.
*
* Returns:
* True if the scrambling is enabled, false otherwise.
*/
bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
{
u8 status;
int ret;
ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to read scrambling status: %d\n", ret);
return false;
}
return status & SCDC_SCRAMBLING_STATUS;
}
EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
/**
* drm_scdc_set_scrambling - enable scrambling
* @adapter: I2C adapter for DDC channel
* @enable: bool to indicate if scrambling is to be enabled/disabled
*
* Writes the TMDS config register over SCDC channel, and:
* enables scrambling when enable = 1
* disables scrambling when enable = 0
*
* Returns:
* True if scrambling is set/reset successfully, false otherwise.
*/
bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
{
u8 config;
int ret;
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
return false;
}
if (enable)
config |= SCDC_SCRAMBLING_ENABLE;
else
config &= ~SCDC_SCRAMBLING_ENABLE;
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to enable scrambling: %d\n", ret);
return false;
}
return true;
}
EXPORT_SYMBOL(drm_scdc_set_scrambling);
/**
* drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio
* @adapter: I2C adapter for DDC channel
* @set: ret or reset the high clock ratio
*
*
* TMDS clock ratio calculations go like this:
* TMDS character = 10 bit TMDS encoded value
*
* TMDS character rate = The rate at which TMDS characters are
* transmitted (Mcsc)
*
* TMDS bit rate = 10x TMDS character rate
*
* As per the spec:
* TMDS clock rate for pixel clock < 340 MHz = 1x the character
* rate = 1/10 pixel clock rate
*
* TMDS clock rate for pixel clock > 340 MHz = 0.25x the character
* rate = 1/40 pixel clock rate
*
* Writes to the TMDS config register over SCDC channel, and:
* sets TMDS clock ratio to 1/40 when set = 1
*
* sets TMDS clock ratio to 1/10 when set = 0
*
* Returns:
* True if write is successful, false otherwise.
*/
bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
{
u8 config;
int ret;
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
return false;
}
if (set)
config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
else
config &= ~SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
DRM_DEBUG_KMS("Failed to set TMDS clock ratio: %d\n", ret);
return false;
}
/*
* The spec says that a source should wait minimum 1ms and maximum
* 100ms after writing the TMDS config for clock ratio. Lets allow a
* wait of up to 2ms here.
*/
usleep_range(1000, 2000);
return true;
}
EXPORT_SYMBOL(drm_scdc_set_high_tmds_clock_ratio);

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_AUDIO_REGS_H__
#define __INTEL_AUDIO_REGS_H__
#include "i915_reg_defs.h"
#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
#define G4X_ELDV_DEVCTG (1 << 14)
#define G4X_ELD_ADDR_MASK (0xf << 5)
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
#define _IBX_HDMIW_HDMIEDID_A 0xE2050
#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
_IBX_HDMIW_HDMIEDID_B)
#define _IBX_AUD_CNTL_ST_A 0xE20B4
#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
_IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
#define _CPT_HDMIW_HDMIEDID_A 0xE5050
#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
#define _CPT_AUD_CNTL_ST_A 0xE50B4
#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
#define _IBX_AUD_CONFIG_A 0xe2000
#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
#define _CPT_AUD_CONFIG_A 0xe5000
#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
#define AUD_CONFIG_N(n) \
(((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
(((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
#define _HSW_AUD_MISC_CTRL_A 0x65010
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
#define AUD_CONFIG_M_MASK 0xfffff
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
#define _HSW_AUD_DIG_CNVT_1 0x65080
#define _HSW_AUD_DIG_CNVT_2 0x65180
#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
#define _HSW_AUD_EDID_DATA_A 0x65050
#define _HSW_AUD_EDID_DATA_B 0x65150
#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
#define AUD_FREQ_CNTRL _MMIO(0x65900)
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
#define AUD_TS_CDCLK_M_EN REG_BIT(31)
#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
/* Display Audio Config Reg */
#define AUD_CONFIG_BE _MMIO(0x65ef0)
#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
#define HBLANK_START_COUNT_8 0
#define HBLANK_START_COUNT_16 1
#define HBLANK_START_COUNT_32 2
#define HBLANK_START_COUNT_64 3
#define HBLANK_START_COUNT_96 4
#define HBLANK_START_COUNT_128 5
#endif /* __INTEL_AUDIO_REGS_H__ */

View File

@ -0,0 +1,314 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vrr.h"
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
const struct drm_display_mode *mode)
{
drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
"type: 0x%x flags: 0x%x\n",
mode->crtc_clock,
mode->crtc_hdisplay, mode->crtc_hsync_start,
mode->crtc_hsync_end, mode->crtc_htotal,
mode->crtc_vdisplay, mode->crtc_vsync_start,
mode->crtc_vsync_end, mode->crtc_vtotal,
mode->type, mode->flags);
}
static void
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
drm_dbg_kms(&i915->drm,
"%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
id, lane_count,
m_n->data_m, m_n->data_n,
m_n->link_m, m_n->link_n, m_n->tu);
}
static void
intel_dump_infoframe(struct drm_i915_private *i915,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
}
static void
intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
const struct drm_dp_vsc_sdp *vsc)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
OUTPUT_TYPE(UNUSED),
OUTPUT_TYPE(ANALOG),
OUTPUT_TYPE(DVO),
OUTPUT_TYPE(SDVO),
OUTPUT_TYPE(LVDS),
OUTPUT_TYPE(TVOUT),
OUTPUT_TYPE(HDMI),
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
OUTPUT_TYPE(DDI),
OUTPUT_TYPE(DP_MST),
};
#undef OUTPUT_TYPE
static void snprintf_output_types(char *buf, size_t len,
unsigned int output_types)
{
char *str = buf;
int i;
str[0] = '\0';
for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
int r;
if ((output_types & BIT(i)) == 0)
continue;
r = snprintf(str, len, "%s%s",
str != buf ? "," : "", output_type_str[i]);
if (r >= len)
break;
str += r;
len -= r;
output_types &= ~BIT(i);
}
WARN_ON_ONCE(output_types != 0);
}
static const char * const output_format_str[] = {
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
};
static const char *output_formats(enum intel_output_format format)
{
if (format >= ARRAY_SIZE(output_format_str))
return "invalid";
return output_format_str[format];
}
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fb) {
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
plane->base.base.id, plane->base.name,
str_yes_no(plane_state->uapi.visible));
return;
}
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, str_yes_no(plane_state->uapi.visible));
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
plane_state->hw.rotation, plane_state->scaler_id);
if (plane_state->uapi.visible)
drm_dbg_kms(&i915->drm,
"\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
DRM_RECT_FP_ARG(&plane_state->uapi.src),
DRM_RECT_ARG(&plane_state->uapi.dst));
}
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
char buf[64];
int i;
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
str_yes_no(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
drm_dbg_kms(&i915->drm,
"active: %s, output_types: %s (0x%x), output format: %s\n",
str_yes_no(pipe_config->hw.active),
buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
drm_dbg_kms(&i915->drm,
"cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
transcoder_name(pipe_config->mst_master_transcoder));
drm_dbg_kms(&i915->drm,
"port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
transcoder_name(pipe_config->master_transcoder),
pipe_config->sync_mode_slaves_mask);
drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
pipe_config->bigjoiner_pipes);
drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
str_enabled_disabled(pipe_config->splitter.enable),
pipe_config->splitter.link_count,
pipe_config->splitter.pixel_overlap);
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count,
&pipe_config->dp_m_n);
intel_dump_m_n_config(pipe_config, "dp m2_n2",
pipe_config->lane_count,
&pipe_config->dp_m2_n2);
}
drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
drm_dbg_kms(&i915->drm,
"audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.flipline,
intel_vrr_vmin_vblank_start(pipe_config),
intel_vrr_vmax_vblank_start(pipe_config));
drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.mode));
drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
drm_dbg_kms(&i915->drm,
"port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
pipe_config->pixel_rate);
drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
if (DISPLAY_VER(i915) >= 9)
drm_dbg_kms(&i915->drm,
"num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
if (HAS_GMCH(i915))
drm_dbg_kms(&i915->drm,
"gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
else
drm_dbg_kms(&i915->drm,
"pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
str_enabled_disabled(pipe_config->pch_pfit.enabled),
str_yes_no(pipe_config->pch_pfit.force_thru));
drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
pipe_config->ips_enabled, pipe_config->double_wide,
pipe_config->has_drrs);
intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(i915))
drm_dbg_kms(&i915->drm,
"cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
else
drm_dbg_kms(&i915->drm,
"csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->csc_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
drm_dbg_kms(&i915->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
pipe_config->hw.degamma_lut ?
drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
pipe_config->hw.gamma_lut ?
drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
dump_planes:
if (!state)
return;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->pipe == crtc->pipe)
intel_dump_plane_state(plane_state);
}
}

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_CRTC_STATE_DUMP_H__
#define __INTEL_CRTC_STATE_DUMP_H__
struct intel_crtc_state;
struct intel_atomic_state;
void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state,
struct intel_atomic_state *state,
const char *context);
#endif /* __INTEL_CRTC_STATE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_DISPLAY_POWER_MAP_H__
#define __INTEL_DISPLAY_POWER_MAP_H__
struct i915_power_domains;
int intel_display_power_map_init(struct i915_power_domains *power_domains);
void intel_display_power_map_cleanup(struct i915_power_domains *power_domains);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_DISPLAY_POWER_WELL_H__
#define __INTEL_DISPLAY_POWER_WELL_H__
#include <linux/types.h>
#include "intel_display.h"
#include "intel_display_power.h"
struct drm_i915_private;
struct i915_power_well;
#define for_each_power_well(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
(__power_well) - (__dev_priv)->power_domains.power_wells < \
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_reverse(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
(__power_well)--)
/*
* i915_power_well_id:
*
* IDs used to look up power wells. Power wells accessed directly bypassing
* the power domains framework must be assigned a unique ID. The rest of power
* wells must be assigned DISP_PW_ID_NONE.
*/
enum i915_power_well_id {
DISP_PW_ID_NONE = 0, /* must be kept zero */
VLV_DISP_PW_DISP2D,
BXT_DISP_PW_DPIO_CMN_A,
VLV_DISP_PW_DPIO_CMN_BC,
GLK_DISP_PW_DPIO_CMN_C,
CHV_DISP_PW_DPIO_CMN_D,
HSW_DISP_PW_GLOBAL,
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
ICL_DISP_PW_3,
SKL_DISP_DC_OFF,
TGL_DISP_PW_TC_COLD_OFF,
};
struct i915_power_well_instance {
const char *name;
const struct i915_power_domain_list {
const enum intel_display_power_domain *list;
u8 count;
} *domain_list;
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
union {
struct {
/*
* request/status flag index in the PUNIT power well
* control/status registers.
*/
u8 idx;
} vlv;
struct {
enum dpio_phy phy;
} bxt;
struct {
/*
* request/status flag index in the power well
* constrol/status registers.
*/
u8 idx;
} hsw;
};
};
struct i915_power_well_desc {
const struct i915_power_well_ops *ops;
const struct i915_power_well_instance_list {
const struct i915_power_well_instance *list;
u8 count;
} *instances;
/* Mask of pipes whose IRQ logic is backed by the pw */
u16 irq_pipe_mask:4;
u16 always_on:1;
/*
* Instead of waiting for the status bit to ack enables,
* just wait a specific amount of time and then consider
* the well enabled.
*/
u16 fixed_enable_delay:1;
/* The pw is backing the VGA functionality */
u16 has_vga:1;
u16 has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
* Thunderbolt mode.
*/
u16 is_tc_tbt:1;
};
struct i915_power_well {
const struct i915_power_well_desc *desc;
struct intel_power_domain_mask domains;
/* power well enable/disable usage count */
int count;
/* cached hw enabled state */
bool hw_enabled;
/* index into desc->instances->list */
u8 instance_idx;
};
struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
enum i915_power_well_id id);
void intel_power_well_enable(struct drm_i915_private *i915,
struct i915_power_well *power_well);
void intel_power_well_disable(struct drm_i915_private *i915,
struct i915_power_well *power_well);
void intel_power_well_sync_hw(struct drm_i915_private *i915,
struct i915_power_well *power_well);
void intel_power_well_get(struct drm_i915_private *i915,
struct i915_power_well *power_well);
void intel_power_well_put(struct drm_i915_private *i915,
struct i915_power_well *power_well);
bool intel_power_well_is_enabled(struct drm_i915_private *i915,
struct i915_power_well *power_well);
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
const char *intel_power_well_name(struct i915_power_well *power_well);
struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well);
int intel_power_well_refcount(struct i915_power_well *power_well);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
extern const struct i915_power_well_ops chv_pipe_power_well_ops;
extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops;
extern const struct i915_power_well_ops i830_pipes_power_well_ops;
extern const struct i915_power_well_ops hsw_power_well_ops;
extern const struct i915_power_well_ops gen9_dc_off_power_well_ops;
extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops;
extern const struct i915_power_well_ops vlv_display_power_well_ops;
extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops;
extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
extern const struct i915_power_well_ops icl_aux_power_well_ops;
extern const struct i915_power_well_ops icl_ddi_power_well_ops;
extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
#endif

View File

@ -0,0 +1,87 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_DMC_REGS_H__
#define __INTEL_DMC_REGS_H__
#include "i915_reg_defs.h"
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
#define __PIPEDMC_REG_MMIO_BASE(i915, dmc_id) \
((DISPLAY_VER(i915) >= 13 ? _ADLP_PIPEDMC_REG_MMIO_BASE_A : \
_TGL_PIPEDMC_REG_MMIO_BASE_A) + \
0x400 * ((dmc_id) - 1))
#define __DMC_REG_MMIO_BASE 0x8f000
#define _DMC_REG_MMIO_BASE(i915, dmc_id) \
((dmc_id) == DMC_FW_MAIN ? __DMC_REG_MMIO_BASE : \
__PIPEDMC_REG_MMIO_BASE(i915, dmc_id))
#define _DMC_REG(i915, dmc_id, reg) \
((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
#define _DMC_EVT_HTP_0 0x8f004
#define DMC_EVT_HTP(i915, dmc_id, handler) \
_MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_HTP_0) + 4 * (handler))
#define _DMC_EVT_CTL_0 0x8f034
#define DMC_EVT_CTL(i915, dmc_id, handler) \
_MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_CTL_0) + 4 * (handler))
#define DMC_EVT_CTL_ENABLE REG_BIT(31)
#define DMC_EVT_CTL_RECURRING REG_BIT(30)
#define DMC_EVT_CTL_TYPE_MASK REG_GENMASK(17, 16)
#define DMC_EVT_CTL_TYPE_LEVEL_0 0
#define DMC_EVT_CTL_TYPE_LEVEL_1 1
#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
/* An event handler scheduled to run at a 1 kHz frequency. */
#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
#define DMC_HTP_SKL _MMIO(0x8F004)
#define DMC_LAST_WRITE _MMIO(0x8F034)
#define DMC_LAST_WRITE_VALUE 0xc003b400
#define DMC_MMIO_START_RANGE 0x80000
#define DMC_MMIO_END_RANGE 0x8FFFF
#define DMC_V1_MMIO_START_RANGE 0x80000
#define TGL_MAIN_MMIO_START 0x8F000
#define TGL_MAIN_MMIO_END 0x8FFFF
#define _TGL_PIPEA_MMIO_START 0x92000
#define _TGL_PIPEA_MMIO_END 0x93FFF
#define _TGL_PIPEB_MMIO_START 0x96000
#define _TGL_PIPEB_MMIO_END 0x97FFF
#define ADLP_PIPE_MMIO_START 0x5F000
#define ADLP_PIPE_MMIO_END 0x5FFFF
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
_TGL_PIPEB_MMIO_START)
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
_TGL_PIPEB_MMIO_END)
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
#define TGL_DMC_DEBUG3 _MMIO(0x101090)
#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
#endif /* __INTEL_DMC_REGS_H__ */

View File

@ -0,0 +1,734 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*
* Read out the current hardware modeset state, and sanitize it to the current
* state.
*/
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
#include "intel_pm.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(i915->bw_obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
struct drm_atomic_state *state;
struct intel_crtc_state *temp_crtc_state;
enum pipe pipe = crtc->pipe;
int ret;
if (!crtc_state->hw.active)
return;
for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
}
state = drm_atomic_state_alloc(&i915->drm);
if (!state) {
drm_dbg_kms(&i915->drm,
"failed to disable [CRTC:%d:%s], out of memory",
crtc->base.base.id, crtc->base.name);
return;
}
state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
ret = drm_atomic_add_affected_connectors(state, &crtc->base);
drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.base.id, crtc->base.name);
crtc->active = false;
crtc->base.enabled = false;
drm_WARN_ON(&i915->drm,
drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
crtc_state->uapi.active = false;
crtc_state->uapi.connector_mask = 0;
crtc_state->uapi.encoder_mask = 0;
intel_crtc_free_hw_state(crtc_state);
memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder)
encoder->base.crtc = NULL;
intel_fbc_disable(crtc);
intel_update_watermarks(i915);
intel_disable_shared_dpll(crtc_state);
intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
cdclk_state->min_cdclk[pipe] = 0;
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
dbuf_state->active_pipes &= ~BIT(pipe);
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
}
static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state = connector->base.state;
struct intel_encoder *encoder =
to_intel_encoder(connector->base.encoder);
if (conn_state->crtc)
drm_connector_put(&connector->base);
if (encoder) {
struct intel_crtc *crtc =
to_intel_crtc(encoder->base.crtc);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
conn_state->best_encoder = &encoder->base;
conn_state->crtc = &crtc->base;
conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
drm_connector_get(&connector->base);
} else {
conn_state->best_encoder = NULL;
conn_state->crtc = NULL;
}
}
drm_connector_list_iter_end(&conn_iter);
}
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
if (intel_crtc_is_bigjoiner_slave(crtc_state))
return;
crtc_state->uapi.enable = crtc_state->hw.enable;
crtc_state->uapi.active = crtc_state->hw.active;
drm_WARN_ON(crtc_state->uapi.crtc->dev,
drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut);
drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
crtc_state->hw.gamma_lut);
drm_property_replace_blob(&crtc_state->uapi.ctm,
crtc_state->hw.ctm);
}
static void
intel_sanitize_plane_mapping(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
if (DISPLAY_VER(i915) >= 4)
return;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_crtc *plane_crtc;
enum pipe pipe;
if (!plane->get_hw_state(plane, &pipe))
continue;
if (pipe == crtc->pipe)
continue;
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
plane_crtc = intel_crtc_for_pipe(i915, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
}
}
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
return true;
return false;
}
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_connector *connector;
for_each_connector_on_encoder(dev, &encoder->base, connector)
return connector;
return NULL;
}
static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!crtc_state->hw.active && !HAS_GMCH(i915))
return;
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
* out with underrun reporting disabled also on inactive pipes,
* since otherwise we'll complain about the garbage we read when
* e.g. coming up after runtime pm.
*
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
crtc->cpu_fifo_underrun_disabled = true;
/*
* We track the PCH trancoder underrun reporting state
* within the crtc. With crtc for pipe A housing the underrun
* reporting state for PCH transcoder A, crtc for pipe B housing
* it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
* and marking underrun reporting as disabled for the non-existing
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
if (intel_has_pch_trancoder(i915, crtc->pipe))
crtc->pch_fifo_underrun_disabled = true;
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
if (crtc_state->hw.active) {
struct intel_plane *plane;
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
if (plane_state->uapi.visible &&
plane->base.type != DRM_PLANE_TYPE_PRIMARY)
intel_plane_disable_noatomic(crtc, plane);
}
/* Disable any background color/etc. set by the BIOS */
intel_color_commit_noarm(crtc_state);
intel_color_commit_arm(crtc_state);
}
/*
* Adjust the state of the output pipe according to whether we have
* active connectors/encoders.
*/
if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
!intel_crtc_is_bigjoiner_slave(crtc_state))
intel_crtc_disable_noatomic(crtc, ctx);
}
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
* the hardware when a high res displays plugged in. DPLL P
* divider is zero, and the pipe timings are bonkers. We'll
* try to disable everything in that case.
*
* FIXME would be nice to be able to sanitize this state
* without several WARNs, but for now let's take the easy
* road.
*/
return IS_SANDYBRIDGE(i915) &&
crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
}
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
/*
* We need to check both for a crtc link (meaning that the encoder is
* active and trying to read from a pipe) and the pipe itself being
* active.
*/
bool has_active_crtc = crtc_state &&
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
drm_dbg_kms(&i915->drm,
"BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
}
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
/*
* Connector is active, but has no active pipe. This is fallout
* from our resume register restoring. Disable the encoder
* manually again.
*/
if (crtc_state) {
struct drm_encoder *best_encoder;
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
/* avoid oopsing in case the hooks consult best_encoder */
best_encoder = connector->base.state->best_encoder;
connector->base.state->best_encoder = &encoder->base;
/* FIXME NULL atomic state passed! */
if (encoder->disable)
encoder->disable(NULL, encoder, crtc_state,
connector->base.state);
if (encoder->post_disable)
encoder->post_disable(NULL, encoder, crtc_state,
connector->base.state);
connector->base.state->best_encoder = best_encoder;
}
encoder->base.crtc = NULL;
/*
* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default.
*/
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
if (HAS_DDI(i915))
intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *i915)
{
struct intel_plane *plane;
struct intel_crtc *crtc;
for_each_intel_plane(&i915->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state;
enum pipe pipe = PIPE_A;
bool visible;
visible = plane->get_hw_state(plane, &pipe);
crtc = intel_crtc_for_pipe(i915, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
str_enabled_disabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
intel_plane_fixup_bitmasks(crtc_state);
}
}
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 active_pipes = 0;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
intel_crtc_state_reset(crtc_state, crtc);
intel_crtc_get_pipe_config(crtc_state);
crtc_state->hw.enable = crtc_state->hw.active;
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
if (crtc_state->hw.active)
active_pipes |= BIT(crtc->pipe);
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
cdclk_state->active_pipes = active_pipes;
dbuf_state->active_pipes = active_pipes;
readout_plane_state(i915);
for_each_intel_encoder(&i915->drm, encoder) {
struct intel_crtc_state *crtc_state = NULL;
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = intel_crtc_for_pipe(i915, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
intel_encoder_get_config(encoder, crtc_state);
/* read out to slave crtc as well for bigjoiner */
if (crtc_state->bigjoiner_pipes) {
struct intel_crtc *slave_crtc;
/* encoder should read be linked to bigjoiner master */
WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
struct intel_crtc_state *slave_crtc_state;
slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
intel_encoder_get_config(encoder, slave_crtc_state);
}
}
} else {
encoder->base.crtc = NULL;
}
if (encoder->sync_state)
encoder->sync_state(encoder, crtc_state);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
str_enabled_disabled(encoder->base.crtc),
pipe_name(pipe));
}
intel_dpll_readout_hw_state(i915);
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
connector->base.dpms = DRM_MODE_DPMS_ON;
encoder = intel_attached_encoder(connector);
connector->base.encoder = &encoder->base;
crtc = to_intel_crtc(encoder->base.crtc);
crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
if (crtc_state && crtc_state->hw.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
crtc_state->uapi.connector_mask |=
drm_connector_mask(&connector->base);
crtc_state->uapi.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_bw_state *bw_state =
to_intel_bw_state(i915->bw_obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
int min_cdclk = 0;
if (crtc_state->hw.active) {
/*
* The initial mode needs to be set in order to keep
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
* But we don't set all the derived state fully, hence
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
crtc_state->inherited = true;
intel_crtc_update_active_timings(crtc_state);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
/*
* FIXME don't have the fb yet, so can't
* use intel_plane_data_rate() :(
*/
if (plane_state->uapi.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
/*
* FIXME don't have the fb yet, so can't
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
crtc_state->min_cdclk[plane->id]);
}
if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
min_cdclk = 0;
}
cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
intel_bw_crtc_update(bw_state, crtc_state);
}
}
static void
get_encoder_power_domains(struct drm_i915_private *i915)
{
struct intel_encoder *encoder;
for_each_intel_encoder(&i915->drm, encoder) {
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
continue;
/*
* MST-primary and inactive encoders don't have a crtc state
* and neither of these require any power domain references.
*/
if (!encoder->base.crtc)
continue;
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
encoder->get_power_domains(encoder, crtc_state);
}
}
static void intel_early_display_was(struct drm_i915_private *i915)
{
/*
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
if (IS_DISPLAY_VER(i915, 10, 12))
intel_de_write(i915, GEN9_CLKGATE_DIS_0,
intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
if (IS_HASWELL(i915)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
intel_de_write(i915, CHICKEN_PAR1_1,
intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
/* Display WA #1142:kbl,cfl,cml */
intel_de_rmw(i915, CHICKEN_PAR1_1,
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
intel_de_rmw(i915, CHICKEN_MISC_2,
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
KBL_ARB_FILL_SPARE_14);
}
}
void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_early_display_was(i915);
intel_modeset_readout_hw_state(i915);
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(i915);
intel_pch_sanitize(i915);
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
*/
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
intel_sanitize_fifo_underrun_reporting(crtc_state);
drm_crtc_vblank_reset(&crtc->base);
if (crtc_state->hw.active)
intel_crtc_vblank_on(crtc_state);
}
intel_fbc_sanitize(i915);
intel_sanitize_plane_mapping(i915);
for_each_intel_encoder(&i915->drm, encoder)
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state");
}
intel_modeset_update_connector_atomic_state(i915);
intel_dpll_sanitize_state(i915);
if (IS_G4X(i915)) {
g4x_wm_get_hw_state(i915);
g4x_wm_sanitize(i915);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_wm_get_hw_state(i915);
vlv_wm_sanitize(i915);
} else if (DISPLAY_VER(i915) >= 9) {
skl_wm_get_hw_state(i915);
skl_wm_sanitize(i915);
} else if (HAS_PCH_SPLIT(i915)) {
ilk_wm_get_hw_state(i915);
}
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_power_domain_mask put_domains;
intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_sanitize_state(i915);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_MODESET_SETUP_H__
#define __INTEL_MODESET_SETUP_H__
struct drm_i915_private;
struct drm_modeset_acquire_ctx;
void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
struct drm_modeset_acquire_ctx *ctx);
#endif /* __INTEL_MODESET_SETUP_H__ */

View File

@ -0,0 +1,246 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*
* High level crtc/connector/encoder modeset state verification.
*/
#include <drm/drm_atomic_state_helper.h>
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
#include "intel_pm.h"
#include "intel_snps_phy.h"
/*
* Cross check the actual hw state with our own modeset state tracking (and its
* internal consistency).
*/
static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = intel_attached_encoder(connector);
I915_STATE_WARN(!crtc_state,
"connector enabled without attached crtc\n");
if (!crtc_state)
return;
I915_STATE_WARN(!crtc_state->hw.active,
"connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
"atomic encoder doesn't match attached encoder\n");
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
I915_STATE_WARN(crtc_state && crtc_state->hw.active,
"attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
}
}
static void
verify_connector_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct intel_crtc_state *crtc_state = NULL;
if (new_conn_state->crtc != &crtc->base)
continue;
if (crtc)
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
intel_connector_verify_state(crtc_state, new_conn_state);
I915_STATE_WARN(new_conn_state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
}
}
static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *pipe_config)
{
if (pipe_config->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
drm_WARN(&dev_priv->drm,
!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
}
static void
verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
{
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
for_each_intel_encoder(&dev_priv->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
if (old_conn_state->best_encoder == &encoder->base)
found = true;
if (new_conn_state->best_encoder != &encoder->base)
continue;
found = true;
enabled = true;
I915_STATE_WARN(new_conn_state->crtc !=
encoder->base.crtc,
"connector's crtc doesn't match encoder crtc\n");
}
if (!found)
continue;
I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch (expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
if (!encoder->base.crtc) {
bool active;
active = encoder->get_hw_state(encoder, &pipe);
I915_STATE_WARN(active,
"encoder detached but still enabled on pipe %c.\n",
pipe_name(pipe));
}
}
}
static void
verify_crtc_state(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config = old_crtc_state;
struct drm_atomic_state *state = old_crtc_state->uapi.state;
struct intel_crtc *master_crtc;
__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
intel_crtc_free_hw_state(old_crtc_state);
intel_crtc_state_reset(old_crtc_state, crtc);
old_crtc_state->uapi.state = state;
drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
crtc->base.name);
pipe_config->hw.enable = new_crtc_state->hw.enable;
intel_crtc_get_pipe_config(pipe_config);
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv) && pipe_config->hw.active)
pipe_config->hw.active = new_crtc_state->hw.active;
I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
"crtc active state doesn't match with hw state (expected %i, found %i)\n",
new_crtc_state->hw.active, pipe_config->hw.active);
I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
"transitional active state does not match atomic hw state (expected %i, found %i)\n",
new_crtc_state->hw.active, crtc->active);
master_crtc = intel_master_crtc(new_crtc_state);
for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
enum pipe pipe;
bool active;
active = encoder->get_hw_state(encoder, &pipe);
I915_STATE_WARN(active != new_crtc_state->hw.active,
"[ENCODER:%i] active %i with crtc active %i\n",
encoder->base.base.id, active,
new_crtc_state->hw.active);
I915_STATE_WARN(active && master_crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
if (active)
intel_encoder_get_config(encoder, pipe_config);
}
if (!new_crtc_state->hw.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
if (!intel_pipe_config_compare(new_crtc_state,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_crtc_state_dump(pipe_config, NULL, "hw state");
intel_crtc_state_dump(new_crtc_state, NULL, "sw state");
}
}
void intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
intel_wm_state_verify(crtc, new_crtc_state);
verify_connector_state(state, crtc);
verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
intel_mpllb_state_verify(state, new_crtc_state);
}
void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state)
{
verify_encoder_state(dev_priv, state);
verify_connector_state(state, NULL);
intel_shared_dpll_verify_disabled(dev_priv);
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_MODESET_VERIFY_H__
#define __INTEL_MODESET_VERIFY_H__
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
void intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state);
void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
#endif /* __INTEL_MODESET_VERIFY_H__ */

View File

@ -0,0 +1,132 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "intel_ggtt_gmch.h"
#include <drm/intel-gtt.h>
#include <drm/i915_drm.h>
#include <linux/agp_backend.h>
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_gtt.h"
#include "intel_gt_regs.h"
#include "intel_gt.h"
static void gmch_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
flags);
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
{
intel_gmch_gtt_flush();
}
static void gmch_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static void gmch_ggtt_remove(struct i915_address_space *vm)
{
intel_gmch_remove();
}
/*
* Certain Gen5 chipsets require idling the GPU before unmapping anything from
* the GTT when VT-d is enabled.
*/
static bool needs_idle_maps(struct drm_i915_private *i915)
{
/*
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if (!i915_vtd_active(i915))
return false;
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
return true;
return false;
}
int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
}
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
"Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
ggtt->do_idle_maps = true;
}
ggtt->vm.insert_page = gmch_ggtt_insert_page;
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
"Applying Ironlake quirks for intel_iommu\n");
return 0;
}
int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
{
if (!intel_gmch_enable_gtt())
return -EIO;
return 0;
}
void intel_ggtt_gmch_flush(void)
{
intel_gmch_gtt_flush();
}

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_GGTT_GMCH_H__
#define __INTEL_GGTT_GMCH_H__
#include "intel_gtt.h"
/* For x86 platforms */
#if IS_ENABLED(CONFIG_X86)
void intel_ggtt_gmch_flush(void);
int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915);
int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt);
/* Stubs for non-x86 platforms */
#else
static inline void intel_ggtt_gmch_flush(void) { }
static inline int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915) { return -ENODEV; }
static inline int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt) { return -ENODEV; }
#endif
#endif /* __INTEL_GGTT_GMCH_H__ */

View File

@ -0,0 +1,224 @@
// SPDX-License-Identifier: MIT
/*
* Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
*/
#include <linux/irq.h>
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
#define GSC_BAR_LENGTH 0x00000FFC
static void gsc_irq_mask(struct irq_data *d)
{
/* generic irq handling */
}
static void gsc_irq_unmask(struct irq_data *d)
{
/* generic irq handling */
}
static struct irq_chip gsc_irq_chip = {
.name = "gsc_irq_chip",
.irq_mask = gsc_irq_mask,
.irq_unmask = gsc_irq_unmask,
};
static int gsc_irq_init(int irq)
{
irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
handle_simple_irq, "gsc_irq_handler");
return irq_set_chip_data(irq, NULL);
}
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
static const struct gsc_def gsc_def_dg1[] = {
{
/* HECI1 not yet implemented. */
},
{
.name = "mei-gscfi",
.bar = DG1_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
}
};
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
},
{
.name = "mei-gscfi",
.bar = DG2_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
}
};
static void gsc_release_dev(struct device *dev)
{
struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
kfree(adev);
}
static void gsc_destroy_one(struct intel_gsc_intf *intf)
{
if (intf->adev) {
auxiliary_device_delete(&intf->adev->aux_dev);
auxiliary_device_uninit(&intf->adev->aux_dev);
intf->adev = NULL;
}
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
}
static void gsc_init_one(struct drm_i915_private *i915,
struct intel_gsc_intf *intf,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
int ret;
intf->irq = -1;
intf->id = intf_id;
if (intf_id == 0 && !HAS_HECI_PXP(i915))
return;
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
drm_warn_once(&i915->drm, "Unknown platform\n");
return;
}
if (!def->name) {
drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
return;
}
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
return;
}
ret = gsc_irq_init(intf->irq);
if (ret < 0) {
drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
goto fail;
}
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
PCI_DEVID(pdev->bus->number, pdev->devfn);
aux_dev->dev.parent = &pdev->dev;
aux_dev->dev.release = gsc_release_dev;
ret = auxiliary_device_init(aux_dev);
if (ret < 0) {
drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
kfree(adev);
goto fail;
}
ret = auxiliary_device_add(aux_dev);
if (ret < 0) {
drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
/* adev will be freed with the put_device() and .release sequence */
auxiliary_device_uninit(aux_dev);
goto fail;
}
intf->adev = adev;
return;
fail:
gsc_destroy_one(intf);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
{
int ret;
if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
return;
}
if (!HAS_HECI_GSC(gt->i915)) {
drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
return;
}
if (gt->gsc.intf[intf_id].irq < 0) {
drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
return;
}
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
}
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
{
if (iir & GSC_IRQ_INTF(0))
gsc_irq_handler(gt, 0);
if (iir & GSC_IRQ_INTF(1))
gsc_irq_handler(gt, 1);
}
void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
{
unsigned int i;
if (!HAS_HECI_GSC(i915))
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_init_one(i915, &gsc->intf[i], i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
{
struct intel_gt *gt = gsc_to_gt(gsc);
unsigned int i;
if (!HAS_HECI_GSC(gt->i915))
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_destroy_one(&gsc->intf[i]);
}

View File

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
*/
#ifndef __INTEL_GSC_DEV_H__
#define __INTEL_GSC_DEV_H__
#include <linux/types.h>
struct drm_i915_private;
struct intel_gt;
struct mei_aux_device;
#define INTEL_GSC_NUM_INTERFACES 2
/*
* The HECI1 bit corresponds to bit15 and HECI2 to bit14.
* The reason for this is to allow growth for more interfaces in the future.
*/
#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
/**
* struct intel_gsc - graphics security controller
* @intf : gsc interface
*/
struct intel_gsc {
struct intel_gsc_intf {
struct mei_aux_device *adev;
int irq;
unsigned int id;
} intf[INTEL_GSC_NUM_INTERFACES];
};
void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
void intel_gsc_fini(struct intel_gsc *gsc);
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
#endif /* __INTEL_GSC_DEV_H__ */

View File

@ -0,0 +1,522 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
/**
* DOC: GT Multicast/Replicated (MCR) Register Support
*
* Some GT registers are designed as "multicast" or "replicated" registers:
* multiple instances of the same register share a single MMIO offset. MCR
* registers are generally used when the hardware needs to potentially track
* independent values of a register per hardware unit (e.g., per-subslice,
* per-L3bank, etc.). The specific types of replication that exist vary
* per-platform.
*
* MMIO accesses to MCR registers are controlled according to the settings
* programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
* registers can be done in either a (i.e., a single write updates all
* instances of the register to the same value) or unicast (a write updates only
* one specific instance). Reads of MCR registers always operate in a unicast
* manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
* Selection of a specific MCR instance for unicast operations is referred to
* as "steering."
*
* If MCR register operations are steered toward a hardware unit that is
* fused off or currently powered down due to power gating, the MMIO operation
* is "terminated" by the hardware. Terminated read operations will return a
* value of zero and terminated unicast write operations will be silently
* ignored.
*/
#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
"MSLICE",
"LNCF",
"INSTANCE 0",
};
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
{ 0x00B100, 0x00B3FF },
{},
};
static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
{ 0x004000, 0x004AFF },
{ 0x00C800, 0x00CFFF },
{ 0x00DD00, 0x00DDFF },
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
{},
};
static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D800, 0x00D8FF },
{},
};
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D880, 0x00D8FF },
{},
};
/*
* We have several types of MCR registers on PVC where steering to (0,0)
* will always provide us with a non-terminated value. We'll stick them
* all in the same table for simplicity.
*/
static const struct intel_mmio_range pvc_instance0_steering_table[] = {
{ 0x004000, 0x004AFF }, /* HALF-BSLICE */
{ 0x008800, 0x00887F }, /* CC */
{ 0x008A80, 0x008AFF }, /* TILEPSMI */
{ 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
{ 0x00B100, 0x00B3FF }, /* L3BANK */
{ 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
{ 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
{ 0x00DD00, 0x00DDFF }, /* BSLICE */
{ 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
{ 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
{ 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
{ 0x024180, 0x0241FF }, /* HALF-BSLICE */
{},
};
void intel_gt_mcr_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
/*
* An mslice is unavailable only if both the meml3 for the slice is
* disabled *and* all of the DSS in the slice (quadrant) are disabled.
*/
if (HAS_MSLICE_STEERING(i915)) {
gt->info.mslice_mask =
intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
GEN_DSS_PER_MSLICE);
gt->info.mslice_mask |=
(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN12_MEML3_EN_MASK);
if (!gt->info.mslice_mask) /* should be impossible! */
drm_warn(&i915->drm, "mslice mask all zero!\n");
}
if (IS_PONTEVECCHIO(i915)) {
gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
} else if (IS_DG2(i915)) {
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
gt->steering_table[LNCF] = dg2_lncf_steering_table;
} else if (IS_XEHPSDV(i915)) {
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
} else if (GRAPHICS_VER(i915) >= 11 &&
GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
if (!gt->info.l3bank_mask) /* should be impossible! */
drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
} else if (GRAPHICS_VER(i915) >= 11) {
/*
* We expect all modern platforms to have at least some
* type of steering that needs to be initialized.
*/
MISSING_CASE(INTEL_INFO(i915)->platform);
}
}
/*
* rw_with_mcr_steering_fw - Access a register with specific MCR steering
* @uncore: pointer to struct intel_uncore
* @reg: register being accessed
* @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
* @group: group number (documented as "sliceid" on older platforms)
* @instance: instance number (documented as "subsliceid" on older platforms)
* @value: register value to be written (ignored for read)
*
* Return: 0 for write access. register value for read access.
*
* Caller needs to make sure the relevant forcewake wells are up.
*/
static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
i915_reg_t reg, u8 rw_flag,
int group, int instance, u32 value)
{
u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
lockdep_assert_held(&uncore->lock);
if (GRAPHICS_VER(uncore->i915) >= 11) {
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
/*
* Wa_22013088509
*
* The setting of the multicast/unicast bit usually wouldn't
* matter for read operations (which always return the value
* from a single register instance regardless of how that bit
* is set), but some platforms have a workaround requiring us
* to remain in multicast mode for reads. There's no real
* downside to this, so we'll just go ahead and do so on all
* platforms; we'll only clear the multicast bit from the mask
* when exlicitly doing a write operation.
*/
if (rw_flag == FW_REG_WRITE)
mcr_mask |= GEN11_MCR_MULTICAST;
} else {
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
}
old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
mcr &= ~mcr_mask;
mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
if (rw_flag == FW_REG_READ)
val = intel_uncore_read_fw(uncore, reg);
else
intel_uncore_write_fw(uncore, reg, value);
mcr &= ~mcr_mask;
mcr |= old_mcr & mcr_mask;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
return val;
}
static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
i915_reg_t reg, u8 rw_flag,
int group, int instance,
u32 value)
{
enum forcewake_domains fw_domains;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
rw_flag);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
return val;
}
/**
* intel_gt_mcr_read - read a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to read
* @group: the MCR group
* @instance: the MCR instance
*
* Returns the value read from an MCR register after steering toward a specific
* group/instance.
*/
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_reg_t reg,
int group, int instance)
{
return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
}
/**
* intel_gt_mcr_unicast_write - write a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
* @group: the MCR group
* @instance: the MCR instance
*
* Write an MCR register in unicast mode after steering toward a specific
* group/instance.
*/
void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_reg_t reg, u32 value,
int group, int instance)
{
rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
}
/**
* intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances.
*/
void intel_gt_mcr_multicast_write(struct intel_gt *gt,
i915_reg_t reg, u32 value)
{
intel_uncore_write(gt->uncore, reg, value);
}
/**
* intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances. This
* function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
* be obtained automatically.
*/
void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_reg_t reg, u32 value)
{
intel_uncore_write_fw(gt->uncore, reg, value);
}
/*
* reg_needs_read_steering - determine whether a register read requires
* explicit steering
* @gt: GT structure
* @reg: the register to check steering requirements for
* @type: type of multicast steering to check
*
* Determines whether @reg needs explicit steering of a specific type for
* reads.
*
* Returns false if @reg does not belong to a register range of the given
* steering type, or if the default (subslice-based) steering IDs are suitable
* for @type steering too.
*/
static bool reg_needs_read_steering(struct intel_gt *gt,
i915_reg_t reg,
enum intel_steering_type type)
{
const u32 offset = i915_mmio_reg_offset(reg);
const struct intel_mmio_range *entry;
if (likely(!gt->steering_table[type]))
return false;
for (entry = gt->steering_table[type]; entry->end; entry++) {
if (offset >= entry->start && offset <= entry->end)
return true;
}
return false;
}
/*
* get_nonterminated_steering - determines valid IDs for a class of MCR steering
* @gt: GT structure
* @type: multicast register type
* @group: Group ID returned
* @instance: Instance ID returned
*
* Determines group and instance values that will steer reads of the specified
* MCR class to a non-terminated instance.
*/
static void get_nonterminated_steering(struct intel_gt *gt,
enum intel_steering_type type,
u8 *group, u8 *instance)
{
switch (type) {
case L3BANK:
*group = 0; /* unused */
*instance = __ffs(gt->info.l3bank_mask);
break;
case MSLICE:
GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
*group = __ffs(gt->info.mslice_mask);
*instance = 0; /* unused */
break;
case LNCF:
/*
* An LNCF is always present if its mslice is present, so we
* can safely just steer to LNCF 0 in all cases.
*/
GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
*group = __ffs(gt->info.mslice_mask) << 1;
*instance = 0; /* unused */
break;
case INSTANCE0:
/*
* There are a lot of MCR types for which instance (0, 0)
* will always provide a non-terminated value.
*/
*group = 0;
*instance = 0;
break;
default:
MISSING_CASE(type);
*group = 0;
*instance = 0;
}
}
/**
* intel_gt_mcr_get_nonterminated_steering - find group/instance values that
* will steer a register to a non-terminated instance
* @gt: GT structure
* @reg: register for which the steering is required
* @group: return variable for group steering
* @instance: return variable for instance steering
*
* This function returns a group/instance pair that is guaranteed to work for
* read steering of the given register. Note that a value will be returned even
* if the register is not replicated and therefore does not actually require
* steering.
*/
void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
i915_reg_t reg,
u8 *group, u8 *instance)
{
int type;
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, group, instance);
return;
}
}
*group = gt->default_steering.groupid;
*instance = gt->default_steering.instanceid;
}
/**
* intel_gt_mcr_read_any_fw - reads one instance of an MCR register
* @gt: GT structure
* @reg: register to read
*
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
* This function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_read_any() in cases where forcewake should be
* obtained automatically.
*
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg)
{
int type;
u8 group, instance;
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
return rw_with_mcr_steering_fw(gt->uncore, reg,
FW_REG_READ,
group, instance, 0);
}
}
return intel_uncore_read_fw(gt->uncore, reg);
}
/**
* intel_gt_mcr_read_any - reads one instance of an MCR register
* @gt: GT structure
* @reg: register to read
*
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
*
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg)
{
int type;
u8 group, instance;
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
return rw_with_mcr_steering(gt->uncore, reg,
FW_REG_READ,
group, instance, 0);
}
}
return intel_uncore_read(gt->uncore, reg);
}
static void report_steering_type(struct drm_printer *p,
struct intel_gt *gt,
enum intel_steering_type type,
bool dump_table)
{
const struct intel_mmio_range *entry;
u8 group, instance;
BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
if (!gt->steering_table[type]) {
drm_printf(p, "%s steering: uses default steering\n",
intel_steering_types[type]);
return;
}
get_nonterminated_steering(gt, type, &group, &instance);
drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
intel_steering_types[type], group, instance);
if (!dump_table)
return;
for (entry = gt->steering_table[type]; entry->end; entry++)
drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
}
void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
bool dump_table)
{
drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
gt->default_steering.groupid,
gt->default_steering.instanceid);
if (IS_PONTEVECCHIO(gt->i915)) {
report_steering_type(p, gt, INSTANCE0, dump_table);
} else if (HAS_MSLICE_STEERING(gt->i915)) {
report_steering_type(p, gt, MSLICE, dump_table);
report_steering_type(p, gt, LNCF, dump_table);
}
}
/**
* intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
* @gt: GT structure
* @dss: DSS ID to obtain steering for
* @group: pointer to storage for steering group ID
* @instance: pointer to storage for steering instance ID
*
* Returns the steering IDs (via the @group and @instance parameters) that
* correspond to a specific subslice/DSS ID.
*/
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance)
{
if (IS_PONTEVECCHIO(gt->i915)) {
*group = dss / GEN_DSS_PER_CSLICE;
*instance = dss % GEN_DSS_PER_CSLICE;
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
} else {
*group = dss / GEN_MAX_SS_PER_HSW_SLICE;
*instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
return;
}
}

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_GT_MCR__
#define __INTEL_GT_MCR__
#include "intel_gt_types.h"
void intel_gt_mcr_init(struct intel_gt *gt);
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_reg_t reg,
int group, int instance);
u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg);
u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg);
void intel_gt_mcr_unicast_write(struct intel_gt *gt,
i915_reg_t reg, u32 value,
int group, int instance);
void intel_gt_mcr_multicast_write(struct intel_gt *gt,
i915_reg_t reg, u32 value);
void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
i915_reg_t reg, u32 value);
void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
i915_reg_t reg,
u8 *group, u8 *instance);
void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
bool dump_table);
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance);
/*
* Helper for for_each_ss_steering loop. On pre-Xe_HP platforms, subslice
* presence is determined by using the group/instance as direct lookups in the
* slice/subslice topology. On Xe_HP and beyond, the steering is unrelated to
* the topology, so we lookup the DSS ID directly in "slice 0."
*/
#define _HAS_SS(ss_, gt_, group_, instance_) ( \
GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
/*
* Loop over each subslice/DSS and determine the group and instance IDs that
* should be used to steer MCR accesses toward this DSS.
*/
#define for_each_ss_steering(ss_, gt_, group_, instance_) \
for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \
ss_ < I915_MAX_SS_FUSE_BITS; \
ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \
for_each_if(_HAS_SS(ss_, gt_, group_, instance_))
#endif /* __INTEL_GT_MCR__ */

View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <drm/drm_device.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/printk.h>
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_gt_types.h"
#include "intel_rc6.h"
bool is_object_gt(struct kobject *kobj)
{
return !strncmp(kobj->name, "gt", 2);
}
static struct intel_gt *kobj_to_gt(struct kobject *kobj)
{
return container_of(kobj, struct intel_gt, sysfs_gt);
}
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name)
{
struct kobject *kobj = &dev->kobj;
/*
* We are interested at knowing from where the interface
* has been called, whether it's called from gt/ or from
* the parent directory.
* From the interface position it depends also the value of
* the private data.
* If the interface is called from gt/ then private data is
* of the "struct intel_gt *" type, otherwise it's * a
* "struct drm_i915_private *" type.
*/
if (!is_object_gt(kobj)) {
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
return to_gt(i915);
}
return kobj_to_gt(kobj);
}
static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
{
return &gt->i915->drm.primary->kdev->kobj;
}
static ssize_t id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
return sysfs_emit(buf, "%u\n", gt->info.id);
}
static DEVICE_ATTR_RO(id);
static struct attribute *id_attrs[] = {
&dev_attr_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(id);
/* A kobject needs a release() method even if it does nothing */
static void kobj_gt_release(struct kobject *kobj)
{
}
static struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
};
void intel_gt_sysfs_register(struct intel_gt *gt)
{
/*
* We need to make things right with the
* ABI compatibility. The files were originally
* generated under the parent directory.
*
* We generate the files only for gt 0
* to avoid duplicates.
*/
if (gt_is_root(gt))
intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
/* init and xfer ownership to sysfs tree */
if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
exit_fail:
kobject_put(&gt->sysfs_gt);
drm_warn(&gt->i915->drm,
"failed to initialize gt%d sysfs root\n", gt->info.id);
}
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
kobject_put(&gt->sysfs_gt);
}

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __SYSFS_GT_H__
#define __SYSFS_GT_H__
#include <linux/ctype.h>
#include <linux/kobject.h>
#include "i915_gem.h" /* GEM_BUG_ON() */
struct intel_gt;
bool is_object_gt(struct kobject *kobj);
struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
struct kobject *
intel_gt_create_kobj(struct intel_gt *gt,
struct kobject *dir,
const char *name);
void intel_gt_sysfs_register(struct intel_gt *gt);
void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name);
#endif /* SYSFS_GT_H */

View File

@ -0,0 +1,779 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <drm/drm_device.h>
#include <linux/sysfs.h>
#include <linux/printk.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
enum intel_gt_sysfs_op {
INTEL_GT_SYSFS_MIN = 0,
INTEL_GT_SYSFS_MAX,
};
static int
sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
int (func)(struct intel_gt *gt, u32 val), u32 val)
{
struct intel_gt *gt;
int ret;
if (!is_object_gt(&dev->kobj)) {
int i;
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
for_each_gt(gt, i915, i) {
ret = func(gt, val);
if (ret)
break;
}
} else {
gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
ret = func(gt, val);
}
return ret;
}
static u32
sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
u32 (func)(struct intel_gt *gt),
enum intel_gt_sysfs_op op)
{
struct intel_gt *gt;
u32 ret;
ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
if (!is_object_gt(&dev->kobj)) {
int i;
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
for_each_gt(gt, i915, i) {
u32 val = func(gt);
switch (op) {
case INTEL_GT_SYSFS_MIN:
if (val < ret)
ret = val;
break;
case INTEL_GT_SYSFS_MAX:
if (val > ret)
ret = val;
break;
}
}
} else {
gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
ret = func(gt);
}
return ret;
}
/* RC6 interfaces will show the minimum RC6 residency value */
#define sysfs_gt_attribute_r_min_func(d, a, f) \
sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN)
/* Frequency interfaces will show the maximum frequency value */
#define sysfs_gt_attribute_r_max_func(d, a, f) \
sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
#ifdef CONFIG_PM
static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
{
intel_wakeref_t wakeref;
u64 res = 0;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
res = intel_rc6_residency_us(&gt->rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
static ssize_t rc6_enable_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
u8 mask = 0;
if (HAS_RC6(gt->i915))
mask |= BIT(0);
if (HAS_RC6p(gt->i915))
mask |= BIT(1);
if (HAS_RC6pp(gt->i915))
mask |= BIT(2);
return sysfs_emit(buff, "%x\n", mask);
}
static u32 __rc6_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, GEN6_GT_GFX_RC6);
}
static ssize_t rc6_residency_ms_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
__rc6_residency_ms_show);
return sysfs_emit(buff, "%u\n", rc6_residency);
}
static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, GEN6_GT_GFX_RC6p);
}
static ssize_t rc6p_residency_ms_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
u32 rc6p_residency = sysfs_gt_attribute_r_min_func(dev, attr,
__rc6p_residency_ms_show);
return sysfs_emit(buff, "%u\n", rc6p_residency);
}
static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, GEN6_GT_GFX_RC6pp);
}
static ssize_t rc6pp_residency_ms_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
u32 rc6pp_residency = sysfs_gt_attribute_r_min_func(dev, attr,
__rc6pp_residency_ms_show);
return sysfs_emit(buff, "%u\n", rc6pp_residency);
}
static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, VLV_GT_MEDIA_RC6);
}
static ssize_t media_rc6_residency_ms_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
__media_rc6_residency_ms_show);
return sysfs_emit(buff, "%u\n", rc6_residency);
}
static DEVICE_ATTR_RO(rc6_enable);
static DEVICE_ATTR_RO(rc6_residency_ms);
static DEVICE_ATTR_RO(rc6p_residency_ms);
static DEVICE_ATTR_RO(rc6pp_residency_ms);
static DEVICE_ATTR_RO(media_rc6_residency_ms);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
NULL
};
static struct attribute *rc6p_attrs[] = {
&dev_attr_rc6p_residency_ms.attr,
&dev_attr_rc6pp_residency_ms.attr,
NULL
};
static struct attribute *media_rc6_attrs[] = {
&dev_attr_media_rc6_residency_ms.attr,
NULL
};
static const struct attribute_group rc6_attr_group[] = {
{ .attrs = rc6_attrs, },
{ .name = power_group_name, .attrs = rc6_attrs, },
};
static const struct attribute_group rc6p_attr_group[] = {
{ .attrs = rc6p_attrs, },
{ .name = power_group_name, .attrs = rc6p_attrs, },
};
static const struct attribute_group media_rc6_attr_group[] = {
{ .attrs = media_rc6_attrs, },
{ .name = power_group_name, .attrs = media_rc6_attrs, },
};
static int __intel_gt_sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return is_object_gt(kobj) ?
sysfs_create_group(kobj, &grp[0]) :
sysfs_merge_group(kobj, &grp[1]);
}
static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
if (!HAS_RC6(gt->i915))
return;
ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u RC6 sysfs files (%pe)\n",
gt->info.id, ERR_PTR(ret));
/*
* cannot use the is_visible() attribute because
* the upper object inherits from the parent group.
*/
if (HAS_RC6p(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u RC6p sysfs files (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create media %u RC6 sysfs files (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
}
#else
static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
}
#endif /* CONFIG_PM */
static u32 __act_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_read_actual_frequency(&gt->rps);
}
static ssize_t act_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 actual_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__act_freq_mhz_show);
return sysfs_emit(buff, "%u\n", actual_freq);
}
static u32 __cur_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_requested_frequency(&gt->rps);
}
static ssize_t cur_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 cur_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__cur_freq_mhz_show);
return sysfs_emit(buff, "%u\n", cur_freq);
}
static u32 __boost_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_boost_frequency(&gt->rps);
}
static ssize_t boost_freq_mhz_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
u32 boost_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__boost_freq_mhz_show);
return sysfs_emit(buff, "%u\n", boost_freq);
}
static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
{
return intel_rps_set_boost_frequency(&gt->rps, val);
}
static ssize_t boost_freq_mhz_store(struct device *dev,
struct device_attribute *attr,
const char *buff, size_t count)
{
ssize_t ret;
u32 val;
ret = kstrtou32(buff, 0, &val);
if (ret)
return ret;
return sysfs_gt_attribute_w_func(dev, attr,
__boost_freq_mhz_store, val) ?: count;
}
static u32 __rp0_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rp0_frequency(&gt->rps);
}
static ssize_t RP0_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 rp0_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__rp0_freq_mhz_show);
return sysfs_emit(buff, "%u\n", rp0_freq);
}
static u32 __rp1_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rp1_frequency(&gt->rps);
}
static ssize_t RP1_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 rp1_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__rp1_freq_mhz_show);
return sysfs_emit(buff, "%u\n", rp1_freq);
}
static u32 __rpn_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rpn_frequency(&gt->rps);
}
static ssize_t RPn_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 rpn_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__rpn_freq_mhz_show);
return sysfs_emit(buff, "%u\n", rpn_freq);
}
static u32 __max_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_max_frequency(&gt->rps);
}
static ssize_t max_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 max_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__max_freq_mhz_show);
return sysfs_emit(buff, "%u\n", max_freq);
}
static int __set_max_freq(struct intel_gt *gt, u32 val)
{
return intel_rps_set_max_frequency(&gt->rps, val);
}
static ssize_t max_freq_mhz_store(struct device *dev,
struct device_attribute *attr,
const char *buff, size_t count)
{
int ret;
u32 val;
ret = kstrtou32(buff, 0, &val);
if (ret)
return ret;
ret = sysfs_gt_attribute_w_func(dev, attr, __set_max_freq, val);
return ret ?: count;
}
static u32 __min_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_min_frequency(&gt->rps);
}
static ssize_t min_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 min_freq = sysfs_gt_attribute_r_min_func(dev, attr,
__min_freq_mhz_show);
return sysfs_emit(buff, "%u\n", min_freq);
}
static int __set_min_freq(struct intel_gt *gt, u32 val)
{
return intel_rps_set_min_frequency(&gt->rps, val);
}
static ssize_t min_freq_mhz_store(struct device *dev,
struct device_attribute *attr,
const char *buff, size_t count)
{
int ret;
u32 val;
ret = kstrtou32(buff, 0, &val);
if (ret)
return ret;
ret = sysfs_gt_attribute_w_func(dev, attr, __set_min_freq, val);
return ret ?: count;
}
static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
{
struct intel_rps *rps = &gt->rps;
return intel_gpu_freq(rps, rps->efficient_freq);
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
u32 rpe_freq = sysfs_gt_attribute_r_max_func(dev, attr,
__vlv_rpe_freq_mhz_show);
return sysfs_emit(buff, "%u\n", rpe_freq);
}
#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store) \
static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, _show, _store); \
static struct device_attribute dev_attr_rps_##_name = __ATTR(rps_##_name, _mode, _show, _store)
#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \
INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL)
#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \
INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store)
/* The below macros generate static structures */
INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
#define GEN6_ATTR(s) { \
&dev_attr_##s##_act_freq_mhz.attr, \
&dev_attr_##s##_cur_freq_mhz.attr, \
&dev_attr_##s##_boost_freq_mhz.attr, \
&dev_attr_##s##_max_freq_mhz.attr, \
&dev_attr_##s##_min_freq_mhz.attr, \
&dev_attr_##s##_RP0_freq_mhz.attr, \
&dev_attr_##s##_RP1_freq_mhz.attr, \
&dev_attr_##s##_RPn_freq_mhz.attr, \
NULL, \
}
#define GEN6_RPS_ATTR GEN6_ATTR(rps)
#define GEN6_GT_ATTR GEN6_ATTR(gt)
static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR;
static ssize_t punit_req_freq_mhz_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
u32 preq = intel_rps_read_punit_req_frequency(&gt->rps);
return sysfs_emit(buff, "%u\n", preq);
}
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
char *buf);
i915_reg_t reg32;
u32 mask;
};
static ssize_t throttle_reason_bool_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
struct intel_gt_bool_throttle_attr *t_attr =
(struct intel_gt_bool_throttle_attr *) attr;
bool val = rps_read_mask_mmio(&gt->rps, t_attr->reg32, t_attr->mask);
return sysfs_emit(buff, "%u\n", val);
}
#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \
struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
.attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \
.show = throttle_reason_bool_show, \
.reg32 = GT0_PERF_LIMIT_REASONS, \
.mask = mask__, \
}
static DEVICE_ATTR_RO(punit_req_freq_mhz);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
static const struct attribute *freq_attrs[] = {
&dev_attr_punit_req_freq_mhz.attr,
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
&attr_throttle_reason_pl4.attr,
&attr_throttle_reason_thermal.attr,
&attr_throttle_reason_prochot.attr,
&attr_throttle_reason_ratl.attr,
&attr_throttle_reason_vr_thermalert.attr,
&attr_throttle_reason_vr_tdc.attr,
NULL
};
/*
* Scaling for multipliers (aka frequency factors).
* The format of the value in the register is u8.8.
*
* The presentation to userspace is inspired by the perf event framework.
* See:
* Documentation/ABI/testing/sysfs-bus-event_source-devices-events
* for description of:
* /sys/bus/event_source/devices/<pmu>/events/<event>.scale
*
* Summary: Expose two sysfs files for each multiplier.
*
* 1. File <attr> contains a raw hardware value.
* 2. File <attr>.scale contains the multiplicative scale factor to be
* used by userspace to compute the actual value.
*
* So userspace knows that to get the frequency_factor it multiplies the
* provided value by the specified scale factor and vice-versa.
*
* That way there is no precision loss in the kernel interface and API
* is future proof should one day the hardware register change to u16.u16,
* on some platform. (Or any other fixed point representation.)
*
* Example:
* File <attr> contains the value 2.5, represented as u8.8 0x0280, which
* is comprised of:
* - an integer part of 2
* - a fractional part of 0x80 (representing 0x80 / 2^8 == 0x80 / 256).
* File <attr>.scale contains a string representation of floating point
* value 0.00390625 (which is (1 / 256)).
* Userspace computes the actual value:
* 0x0280 * 0.00390625 -> 2.5
* or converts an actual value to the value to be written into <attr>:
* 2.5 / 0.00390625 -> 0x0280
*/
#define U8_8_VAL_MASK 0xffff
#define U8_8_SCALE_TO_VALUE "0.00390625"
static ssize_t freq_factor_scale_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
}
static u32 media_ratio_mode_to_factor(u32 mode)
{
/* 0 -> 0, 1 -> 256, 2 -> 128 */
return !mode ? mode : 256 / mode;
}
static ssize_t media_freq_factor_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
intel_wakeref_t wakeref;
u32 mode;
/*
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
*/
if (IS_XEHPSDV(gt->i915) &&
slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
/*
* For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
* the media_ratio_mode, just return the cached media ratio
*/
mode = slpc->media_ratio_mode;
} else {
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
}
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
}
static ssize_t media_freq_factor_store(struct device *dev,
struct device_attribute *attr,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
u32 factor, mode;
int err;
err = kstrtou32(buff, 0, &factor);
if (err)
return err;
for (mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
mode <= SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; mode++)
if (factor == media_ratio_mode_to_factor(mode))
break;
if (mode > SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO)
return -EINVAL;
err = intel_guc_slpc_set_media_ratio_mode(slpc, mode);
if (!err) {
slpc->media_ratio_mode = mode;
DRM_DEBUG("Set slpc->media_ratio_mode to %d", mode);
}
return err ?: count;
}
static ssize_t media_RP0_freq_mhz_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
u32 val;
int err;
err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
PCODE_MBOX_FC_SC_READ_FUSED_P0,
PCODE_MBOX_DOMAIN_MEDIAFF, &val);
if (err)
return err;
/* Fused media RP0 read from pcode is in units of 50 MHz */
val *= GT_FREQUENCY_MULTIPLIER;
return sysfs_emit(buff, "%u\n", val);
}
static ssize_t media_RPn_freq_mhz_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
u32 val;
int err;
err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
PCODE_MBOX_FC_SC_READ_FUSED_PN,
PCODE_MBOX_DOMAIN_MEDIAFF, &val);
if (err)
return err;
/* Fused media RPn read from pcode is in units of 50 MHz */
val *= GT_FREQUENCY_MULTIPLIER;
return sysfs_emit(buff, "%u\n", val);
}
static DEVICE_ATTR_RW(media_freq_factor);
static struct device_attribute dev_attr_media_freq_factor_scale =
__ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
static DEVICE_ATTR_RO(media_RP0_freq_mhz);
static DEVICE_ATTR_RO(media_RPn_freq_mhz);
static const struct attribute *media_perf_power_attrs[] = {
&dev_attr_media_freq_factor.attr,
&dev_attr_media_freq_factor_scale.attr,
&dev_attr_media_RP0_freq_mhz.attr,
&dev_attr_media_RPn_freq_mhz.attr,
NULL
};
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
int ret;
if (GRAPHICS_VER(gt->i915) < 6)
return 0;
ret = sysfs_create_files(kobj, attrs);
if (ret)
return ret;
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
ret = sysfs_create_file(kobj, &dev_attr_vlv_rpe_freq_mhz.attr);
return ret;
}
void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
intel_sysfs_rc6_init(gt, kobj);
ret = is_object_gt(kobj) ?
intel_sysfs_rps_init(gt, kobj, gen6_rps_attrs) :
intel_sysfs_rps_init(gt, kobj, gen6_gt_attrs);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u RPS sysfs files (%pe)",
gt->info.id, ERR_PTR(ret));
/* end of the legacy interfaces */
if (!is_object_gt(kobj))
return;
ret = sysfs_create_files(kobj, freq_attrs);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u throttle sysfs files (%pe)",
gt->info.id, ERR_PTR(ret));
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __SYSFS_GT_PM_H__
#define __SYSFS_GT_PM_H__
#include <linux/kobject.h>
#include "intel_gt_types.h"
void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj);
#endif /* SYSFS_RC6_H */

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _INTEL_HWCONFIG_H_
#define _INTEL_HWCONFIG_H_
#include <linux/types.h>
struct intel_gt;
struct intel_hwconfig {
u32 size;
void *ptr;
};
int intel_gt_init_hwconfig(struct intel_gt *gt);
void intel_gt_fini_hwconfig(struct intel_gt *gt);
#endif /* _INTEL_HWCONFIG_H_ */

View File

@ -0,0 +1,218 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021-2022 Intel Corporation
*/
#ifndef _INTEL_GUC_CAPTURE_FWIF_H
#define _INTEL_GUC_CAPTURE_FWIF_H
#include <linux/types.h>
#include "intel_guc_fwif.h"
struct intel_guc;
struct file;
/**
* struct __guc_capture_bufstate
*
* Book-keeping structure used to track read and write pointers
* as we extract error capture data from the GuC-log-buffer's
* error-capture region as a stream of dwords.
*/
struct __guc_capture_bufstate {
u32 size;
void *data;
u32 rd;
u32 wr;
};
/**
* struct __guc_capture_parsed_output - extracted error capture node
*
* A single unit of extracted error-capture output data grouped together
* at an engine-instance level. We keep these nodes in a linked list.
* See cachelist and outlist below.
*/
struct __guc_capture_parsed_output {
/*
* A single set of 3 capture lists: a global-list
* an engine-class-list and an engine-instance list.
* outlist in __guc_capture_parsed_output will keep
* a linked list of these nodes that will eventually
* be detached from outlist and attached into to
* i915_gpu_codedump in response to a context reset
*/
struct list_head link;
bool is_partial;
u32 eng_class;
u32 eng_inst;
u32 guc_id;
u32 lrca;
struct gcap_reg_list_info {
u32 vfid;
u32 num_regs;
struct guc_mmio_reg *regs;
} reginfo[GUC_CAPTURE_LIST_TYPE_MAX];
#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_CAPTURE_LIST_TYPE_GLOBAL)
#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS)
#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE)
};
/**
* struct guc_debug_capture_list_header / struct guc_debug_capture_list
*
* As part of ADS registration, these header structures (followed by
* an array of 'struct guc_mmio_reg' entries) are used to register with
* GuC microkernel the list of registers we want it to dump out prior
* to a engine reset.
*/
struct guc_debug_capture_list_header {
u32 info;
#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
} __packed;
struct guc_debug_capture_list {
struct guc_debug_capture_list_header header;
struct guc_mmio_reg regs[0];
} __packed;
/**
* struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group
*
* intel_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
* (offsets, names, flags,...) that are used at the ADS regisration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
struct __guc_mmio_reg_descr {
i915_reg_t reg;
u32 flags;
u32 mask;
const char *regname;
};
struct __guc_mmio_reg_descr_group {
const struct __guc_mmio_reg_descr *list;
u32 num_regs;
u32 owner; /* see enum guc_capture_owner */
u32 type; /* see enum guc_capture_type */
u32 engine; /* as per MAX_ENGINE_CLASS */
struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
};
/**
* struct guc_state_capture_header_t / struct guc_state_capture_t /
* guc_state_capture_group_header_t / guc_state_capture_group_t
*
* Prior to resetting engines that have hung or faulted, GuC microkernel
* reports the engine error-state (register values that was read) by
* logging them into the shared GuC log buffer using these hierarchy
* of structures.
*/
struct guc_state_capture_header_t {
u32 owner;
#define CAP_HDR_CAPTURE_VFID GENMASK(7, 0)
u32 info;
#define CAP_HDR_CAPTURE_TYPE GENMASK(3, 0) /* see enum guc_capture_type */
#define CAP_HDR_ENGINE_CLASS GENMASK(7, 4) /* see GUC_MAX_ENGINE_CLASSES */
#define CAP_HDR_ENGINE_INSTANCE GENMASK(11, 8)
u32 lrca; /* if type-instance, LRCA (address) that hung, else set to ~0 */
u32 guc_id; /* if type-instance, context index of hung context, else set to ~0 */
u32 num_mmios;
#define CAP_HDR_NUM_MMIOS GENMASK(9, 0)
} __packed;
struct guc_state_capture_t {
struct guc_state_capture_header_t header;
struct guc_mmio_reg mmio_entries[0];
} __packed;
enum guc_capture_group_types {
GUC_STATE_CAPTURE_GROUP_TYPE_FULL,
GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL,
GUC_STATE_CAPTURE_GROUP_TYPE_MAX,
};
struct guc_state_capture_group_header_t {
u32 owner;
#define CAP_GRP_HDR_CAPTURE_VFID GENMASK(7, 0)
u32 info;
#define CAP_GRP_HDR_NUM_CAPTURES GENMASK(7, 0)
#define CAP_GRP_HDR_CAPTURE_TYPE GENMASK(15, 8) /* guc_capture_group_types */
} __packed;
/* this is the top level structure where an error-capture dump starts */
struct guc_state_capture_group_t {
struct guc_state_capture_group_header_t grp_header;
struct guc_state_capture_t capture_entries[0];
} __packed;
/**
* struct __guc_capture_ads_cache
*
* A structure to cache register lists that were populated and registered
* with GuC at startup during ADS registration. This allows much quicker
* GuC resets without re-parsing all the tables for the given gt.
*/
struct __guc_capture_ads_cache {
bool is_valid;
void *ptr;
size_t size;
int status;
};
/**
* struct intel_guc_state_capture
*
* Internal context of the intel_guc_capture module.
*/
struct intel_guc_state_capture {
/**
* @reglists: static table of register lists used for error-capture state.
*/
const struct __guc_mmio_reg_descr_group *reglists;
/**
* @extlists: allocated table of steered register lists used for error-capture state.
*
* NOTE: steered registers have multiple instances depending on the HW configuration
* (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup
*/
struct __guc_mmio_reg_descr_group *extlists;
/**
* @ads_cache: cached register lists that is ADS format ready
*/
struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
[GUC_CAPTURE_LIST_TYPE_MAX]
[GUC_MAX_ENGINE_CLASSES];
void *ads_null_cache;
/**
* @cachelist: Pool of pre-allocated nodes for error capture output
*
* We need this pool of pre-allocated nodes because we cannot
* dynamically allocate new nodes when receiving the G2H notification
* because the event handlers for all G2H event-processing is called
* by the ct processing worker queue and when that queue is being
* processed, there is no absoluate guarantee that we are not in the
* midst of a GT reset operation (which doesn't allow allocations).
*/
struct list_head cachelist;
#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
#define PREALLOC_NODES_DEFAULT_NUMREGS 64
int max_mmio_per_node;
/**
* @outlist: Pool of pre-allocated nodes for error capture output
*
* A linked list of parsed GuC error-capture output data before
* reporting with formatting via i915_gpu_coredump. Each node in this linked list shall
* contain a single engine-capture including global, engine-class and
* engine-instance register dumps as per guc_capture_parsed_output_node
*/
struct list_head outlist;
};
#endif /* _INTEL_GUC_CAPTURE_FWIF_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021-2021 Intel Corporation
*/
#ifndef _INTEL_GUC_CAPTURE_H
#define _INTEL_GUC_CAPTURE_H
#include <linux/types.h>
struct drm_i915_error_state_buf;
struct guc_gt_system_info;
struct intel_engine_coredump;
struct intel_context;
struct intel_gt;
struct intel_guc;
void intel_guc_capture_free_node(struct intel_engine_coredump *ee);
int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee);
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
struct intel_context *ce);
void intel_guc_capture_process(struct intel_guc *guc);
int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr);
int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size);
int intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size);
void intel_guc_capture_destroy(struct intel_guc *guc);
int intel_guc_capture_init(struct intel_guc *guc);
#endif /* _INTEL_GUC_CAPTURE_H */

View File

@ -0,0 +1,164 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "gt/intel_gt.h"
#include "gt/intel_hwconfig.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
/*
* GuC has a blob containing hardware configuration information (HWConfig).
* This is formatted as a simple and flexible KLV (Key/Length/Value) table.
*
* For example, a minimal version could be:
* enum device_attr {
* ATTR_SOME_VALUE = 0,
* ATTR_SOME_MASK = 1,
* };
*
* static const u32 hwconfig[] = {
* ATTR_SOME_VALUE,
* 1, // Value Length in DWords
* 8, // Value
*
* ATTR_SOME_MASK,
* 3,
* 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000,
* };
*
* The attribute ids are defined in a hardware spec.
*/
static int __guc_action_get_hwconfig(struct intel_guc *guc,
u32 ggtt_offset, u32 ggtt_size)
{
u32 action[] = {
INTEL_GUC_ACTION_GET_HWCONFIG,
lower_32_bits(ggtt_offset),
upper_32_bits(ggtt_offset),
ggtt_size,
};
int ret;
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret == -ENXIO)
return -ENOENT;
return ret;
}
static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
{
int ret;
/*
* Sending a query with zero offset and size will return the
* size of the blob.
*/
ret = __guc_action_get_hwconfig(guc, 0, 0);
if (ret < 0)
return ret;
if (ret == 0)
return -EINVAL;
hwconfig->size = ret;
return 0;
}
static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
{
struct i915_vma *vma;
u32 ggtt_offset;
void *vaddr;
int ret;
GEM_BUG_ON(!hwconfig->size);
ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
if (ret)
return ret;
ggtt_offset = intel_guc_ggtt_offset(guc, vma);
ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size);
if (ret >= 0)
memcpy(hwconfig->ptr, vaddr, hwconfig->size);
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
return ret;
}
static bool has_table(struct drm_i915_private *i915)
{
if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
return true;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
return false;
}
/**
* intel_guc_hwconfig_init - Initialize the HWConfig
*
* Retrieve the HWConfig table from the GuC and save it locally.
* It can then be queried on demand by other users later on.
*/
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
struct intel_guc *guc = &gt->uc.guc;
int ret;
if (!has_table(gt->i915))
return 0;
ret = guc_hwconfig_discover_size(guc, hwconfig);
if (ret)
return ret;
hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL);
if (!hwconfig->ptr) {
hwconfig->size = 0;
return -ENOMEM;
}
ret = guc_hwconfig_fill_buffer(guc, hwconfig);
if (ret < 0) {
intel_gt_fini_hwconfig(gt);
return ret;
}
return 0;
}
/**
* intel_gt_init_hwconfig - Initialize the HWConfig if available
*
* Retrieve the HWConfig table if available on the current platform.
*/
int intel_gt_init_hwconfig(struct intel_gt *gt)
{
if (!intel_uc_uses_guc(&gt->uc))
return 0;
return guc_hwconfig_init(gt);
}
/**
* intel_gt_fini_hwconfig - Finalize the HWConfig
*
* Free up the memory allocation holding the table.
*/
void intel_gt_fini_hwconfig(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
kfree(hwconfig->ptr);
hwconfig->size = 0;
hwconfig->ptr = NULL;
}

View File

@ -0,0 +1,160 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <uapi/drm/i915_drm.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
#include "i915_drm_client.h"
#include "i915_file_private.h"
#include "i915_gem.h"
#include "i915_utils.h"
void i915_drm_clients_init(struct i915_drm_clients *clients,
struct drm_i915_private *i915)
{
clients->i915 = i915;
clients->next_id = 0;
xa_init_flags(&clients->xarray, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
}
struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients)
{
struct i915_drm_client *client;
struct xarray *xa = &clients->xarray;
int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
xa_lock_irq(xa);
ret = __xa_alloc_cyclic(xa, &client->id, client, xa_limit_32b,
&clients->next_id, GFP_KERNEL);
xa_unlock_irq(xa);
if (ret < 0)
goto err;
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
client->clients = clients;
return client;
err:
kfree(client);
return ERR_PTR(ret);
}
void __i915_drm_client_free(struct kref *kref)
{
struct i915_drm_client *client =
container_of(kref, typeof(*client), kref);
struct xarray *xa = &client->clients->xarray;
unsigned long flags;
xa_lock_irqsave(xa, flags);
__xa_erase(xa, client->id);
xa_unlock_irqrestore(xa, flags);
kfree(client);
}
void i915_drm_clients_fini(struct i915_drm_clients *clients)
{
GEM_BUG_ON(!xa_empty(&clients->xarray));
xa_destroy(&clients->xarray);
}
#ifdef CONFIG_PROC_FS
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
[I915_ENGINE_CLASS_VIDEO] = "video",
[I915_ENGINE_CLASS_VIDEO_ENHANCE] = "video-enhance",
[I915_ENGINE_CLASS_COMPUTE] = "compute",
};
static u64 busy_add(struct i915_gem_context *ctx, unsigned int class)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
u64 total = 0;
for_each_gem_engine(ce, rcu_dereference(ctx->engines), it) {
if (ce->engine->uabi_class != class)
continue;
total += intel_context_get_total_runtime_ns(ce);
}
return total;
}
static void
show_client_class(struct seq_file *m,
struct i915_drm_client *client,
unsigned int class)
{
const struct list_head *list = &client->ctx_list;
u64 total = atomic64_read(&client->past_runtime[class]);
const unsigned int capacity =
client->clients->i915->engine_uabi_class_count[class];
struct i915_gem_context *ctx;
rcu_read_lock();
list_for_each_entry_rcu(ctx, list, client_link)
total += busy_add(ctx, class);
rcu_read_unlock();
if (capacity)
seq_printf(m, "drm-engine-%s:\t%llu ns\n",
uabi_class_names[class], total);
if (capacity > 1)
seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
uabi_class_names[class],
capacity);
}
void i915_drm_client_fdinfo(struct seq_file *m, struct file *f)
{
struct drm_file *file = f->private_data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
struct i915_drm_client *client = file_priv->client;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int i;
/*
* ******************************************************************
* For text output format description please see drm-usage-stats.rst!
* ******************************************************************
*/
seq_printf(m, "drm-driver:\t%s\n", i915->drm.driver->name);
seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n",
pci_domain_nr(pdev->bus), pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
seq_printf(m, "drm-client-id:\t%u\n", client->id);
/*
* Temporarily skip showing client engine information with GuC submission till
* fetching engine busyness is implemented in the GuC submission backend
*/
if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc))
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(m, client, i);
}
#endif

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __I915_DRM_CLIENT_H__
#define __I915_DRM_CLIENT_H__
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/xarray.h>
#include <uapi/drm/i915_drm.h>
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_i915_private;
struct i915_drm_clients {
struct drm_i915_private *i915;
struct xarray xarray;
u32 next_id;
};
struct i915_drm_client {
struct kref kref;
unsigned int id;
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
struct i915_drm_clients *clients;
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1];
};
void i915_drm_clients_init(struct i915_drm_clients *clients,
struct drm_i915_private *i915);
static inline struct i915_drm_client *
i915_drm_client_get(struct i915_drm_client *client)
{
kref_get(&client->kref);
return client;
}
void __i915_drm_client_free(struct kref *kref);
static inline void i915_drm_client_put(struct i915_drm_client *client)
{
kref_put(&client->kref, __i915_drm_client_free);
}
struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients);
#ifdef CONFIG_PROC_FS
void i915_drm_client_fdinfo(struct seq_file *m, struct file *f);
#endif
void i915_drm_clients_fini(struct i915_drm_clients *clients);
#endif /* !__I915_DRM_CLIENT_H__ */

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __I915_TASKLET_H__
#define __I915_TASKLET_H__
#include <linux/interrupt.h>
static inline void tasklet_lock(struct tasklet_struct *t)
{
while (!tasklet_trylock(t))
cpu_relax();
}
static inline bool tasklet_is_locked(const struct tasklet_struct *t)
{
return test_bit(TASKLET_STATE_RUN, &t->state);
}
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
tasklet_unlock_spin_wait(t);
}
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
{
return !atomic_read(&t->count);
}
static inline bool __tasklet_enable(struct tasklet_struct *t)
{
return atomic_dec_and_test(&t->count);
}
static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
{
return test_bit(TASKLET_STATE_SCHED, &t->state);
}
#endif /* __I915_TASKLET_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
* Copyright (C) 2019, 2020 Paul Boddie <paul@boddie.org.uk>
*
* Derived from dw_hdmi-imx.c with i.MX portions removed.
*/
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
static const struct dw_hdmi_mpll_config ingenic_mpll_cfg[] = {
{ 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } } },
{ 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 } } },
{ 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a } } },
{ 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f } } },
{ ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 } } }
};
static const struct dw_hdmi_curr_ctrl ingenic_cur_ctr[] = {
/*pixelclk bpp8 bpp10 bpp12 */
{ 54000000, { 0x091c, 0x091c, 0x06dc } },
{ 58400000, { 0x091c, 0x06dc, 0x06dc } },
{ 72000000, { 0x06dc, 0x06dc, 0x091c } },
{ 74250000, { 0x06dc, 0x0b5c, 0x091c } },
{ 118800000, { 0x091c, 0x091c, 0x06dc } },
{ 216000000, { 0x06dc, 0x0b5c, 0x091c } },
{ ~0UL, { 0x0000, 0x0000, 0x0000 } },
};
/*
* Resistance term 133Ohm Cfg
* PREEMP config 0.00
* TX/CK level 10
*/
static const struct dw_hdmi_phy_config ingenic_phy_config[] = {
/*pixelclk symbol term vlev */
{ 216000000, 0x800d, 0x0005, 0x01ad},
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
static enum drm_mode_status
ingenic_dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
if (mode->clock < 13500)
return MODE_CLOCK_LOW;
/* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
if (mode->clock > 216000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static struct dw_hdmi_plat_data ingenic_dw_hdmi_plat_data = {
.mpll_cfg = ingenic_mpll_cfg,
.cur_ctr = ingenic_cur_ctr,
.phy_config = ingenic_phy_config,
.mode_valid = ingenic_dw_hdmi_mode_valid,
.output_port = 1,
};
static const struct of_device_id ingenic_dw_hdmi_dt_ids[] = {
{ .compatible = "ingenic,jz4780-dw-hdmi" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ingenic_dw_hdmi_dt_ids);
static void ingenic_dw_hdmi_cleanup(void *data)
{
struct dw_hdmi *hdmi = (struct dw_hdmi *)data;
dw_hdmi_remove(hdmi);
}
static int ingenic_dw_hdmi_probe(struct platform_device *pdev)
{
struct dw_hdmi *hdmi;
hdmi = dw_hdmi_probe(pdev, &ingenic_dw_hdmi_plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
return devm_add_action_or_reset(&pdev->dev, ingenic_dw_hdmi_cleanup, hdmi);
}
static struct platform_driver ingenic_dw_hdmi_driver = {
.probe = ingenic_dw_hdmi_probe,
.driver = {
.name = "dw-hdmi-ingenic",
.of_match_table = ingenic_dw_hdmi_dt_ids,
},
};
module_platform_driver(ingenic_dw_hdmi_driver);
MODULE_DESCRIPTION("JZ4780 Specific DW-HDMI Driver Extension");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dw-hdmi-ingenic");

View File

@ -0,0 +1,9 @@
config DRM_LOGICVC
tristate "LogiCVC DRM"
depends on DRM
depends on OF || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
DRM display driver for the logiCVC programmable logic block from Xylon

View File

@ -0,0 +1,9 @@
logicvc-drm-y += \
logicvc_crtc.o \
logicvc_drm.o \
logicvc_interface.o \
logicvc_layer.o \
logicvc_mode.o \
logicvc_of.o
obj-$(CONFIG_DRM_LOGICVC) += logicvc-drm.o

View File

@ -0,0 +1,280 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_layer.h"
#include "logicvc_regs.h"
#define logicvc_crtc(c) \
container_of(c, struct logicvc_crtc, drm_crtc)
static enum drm_mode_status
logicvc_crtc_mode_valid(struct drm_crtc *drm_crtc,
const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
return 0;
}
static void logicvc_crtc_atomic_begin(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
struct drm_crtc_state *old_state =
drm_atomic_get_old_crtc_state(state, drm_crtc);
struct drm_device *drm_dev = drm_crtc->dev;
unsigned long flags;
/*
* We need to grab the pending event here if vblank was already enabled
* since we won't get a call to atomic_enable to grab it.
*/
if (drm_crtc->state->event && old_state->active) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
crtc->event = drm_crtc->state->event;
drm_crtc->state->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
static void logicvc_crtc_atomic_enable(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_crtc *crtc = logicvc_crtc(drm_crtc);
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
struct drm_crtc_state *old_state =
drm_atomic_get_old_crtc_state(state, drm_crtc);
struct drm_crtc_state *new_state =
drm_atomic_get_new_crtc_state(state, drm_crtc);
struct drm_display_mode *mode = &new_state->adjusted_mode;
struct drm_device *drm_dev = drm_crtc->dev;
unsigned int hact, hfp, hsl, hbp;
unsigned int vact, vfp, vsl, vbp;
unsigned long flags;
u32 ctrl;
/* Timings */
hact = mode->hdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsl = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
vact = mode->vdisplay;
vfp = mode->vsync_start - mode->vdisplay;
vsl = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
regmap_write(logicvc->regmap, LOGICVC_HSYNC_FRONT_PORCH_REG, hfp - 1);
regmap_write(logicvc->regmap, LOGICVC_HSYNC_REG, hsl - 1);
regmap_write(logicvc->regmap, LOGICVC_HSYNC_BACK_PORCH_REG, hbp - 1);
regmap_write(logicvc->regmap, LOGICVC_HRES_REG, hact - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_FRONT_PORCH_REG, vfp - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_REG, vsl - 1);
regmap_write(logicvc->regmap, LOGICVC_VSYNC_BACK_PORCH_REG, vbp - 1);
regmap_write(logicvc->regmap, LOGICVC_VRES_REG, vact - 1);
/* Signals */
ctrl = LOGICVC_CTRL_HSYNC_ENABLE | LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_DE_ENABLE;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl |= LOGICVC_CTRL_HSYNC_INVERT;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ctrl |= LOGICVC_CTRL_VSYNC_INVERT;
if (logicvc->interface) {
struct drm_connector *connector =
&logicvc->interface->drm_connector;
struct drm_display_info *display_info =
&connector->display_info;
if (display_info->bus_flags & DRM_BUS_FLAG_DE_LOW)
ctrl |= LOGICVC_CTRL_DE_INVERT;
if (display_info->bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl |= LOGICVC_CTRL_CLOCK_INVERT;
}
regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
LOGICVC_CTRL_HSYNC_ENABLE |
LOGICVC_CTRL_HSYNC_INVERT |
LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_VSYNC_INVERT |
LOGICVC_CTRL_DE_ENABLE |
LOGICVC_CTRL_DE_INVERT |
LOGICVC_CTRL_PIXEL_INVERT |
LOGICVC_CTRL_CLOCK_INVERT, ctrl);
/* Generate internal state reset. */
regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
drm_crtc_vblank_on(drm_crtc);
/* Register our event after vblank is enabled. */
if (drm_crtc->state->event && !old_state->active) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
WARN_ON(drm_crtc_vblank_get(drm_crtc) != 0);
crtc->event = drm_crtc->state->event;
drm_crtc->state->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
static void logicvc_crtc_atomic_disable(struct drm_crtc *drm_crtc,
struct drm_atomic_state *state)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
struct drm_device *drm_dev = drm_crtc->dev;
drm_crtc_vblank_off(drm_crtc);
/* Disable and clear CRTC bits. */
regmap_update_bits(logicvc->regmap, LOGICVC_CTRL_REG,
LOGICVC_CTRL_HSYNC_ENABLE |
LOGICVC_CTRL_HSYNC_INVERT |
LOGICVC_CTRL_VSYNC_ENABLE |
LOGICVC_CTRL_VSYNC_INVERT |
LOGICVC_CTRL_DE_ENABLE |
LOGICVC_CTRL_DE_INVERT |
LOGICVC_CTRL_PIXEL_INVERT |
LOGICVC_CTRL_CLOCK_INVERT, 0);
/* Generate internal state reset. */
regmap_write(logicvc->regmap, LOGICVC_DTYPE_REG, 0);
/* Consume any leftover event since vblank is now disabled. */
if (drm_crtc->state->event && !drm_crtc->state->active) {
spin_lock_irq(&drm_dev->event_lock);
drm_crtc_send_vblank_event(drm_crtc, drm_crtc->state->event);
drm_crtc->state->event = NULL;
spin_unlock_irq(&drm_dev->event_lock);
}
}
static const struct drm_crtc_helper_funcs logicvc_crtc_helper_funcs = {
.mode_valid = logicvc_crtc_mode_valid,
.atomic_begin = logicvc_crtc_atomic_begin,
.atomic_enable = logicvc_crtc_atomic_enable,
.atomic_disable = logicvc_crtc_atomic_disable,
};
static int logicvc_crtc_enable_vblank(struct drm_crtc *drm_crtc)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
/* Clear any pending V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_STAT_REG,
LOGICVC_INT_STAT_V_SYNC, LOGICVC_INT_STAT_V_SYNC);
/* Unmask V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
LOGICVC_INT_MASK_V_SYNC, 0);
return 0;
}
static void logicvc_crtc_disable_vblank(struct drm_crtc *drm_crtc)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_crtc->dev);
/* Mask V_SYNC interrupt. */
regmap_write_bits(logicvc->regmap, LOGICVC_INT_MASK_REG,
LOGICVC_INT_MASK_V_SYNC, LOGICVC_INT_MASK_V_SYNC);
}
static const struct drm_crtc_funcs logicvc_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = logicvc_crtc_enable_vblank,
.disable_vblank = logicvc_crtc_disable_vblank,
};
void logicvc_crtc_vblank_handler(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct logicvc_crtc *crtc = logicvc->crtc;
unsigned long flags;
if (!crtc)
return;
drm_crtc_handle_vblank(&crtc->drm_crtc);
if (crtc->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
drm_crtc_send_vblank_event(&crtc->drm_crtc, crtc->event);
drm_crtc_vblank_put(&crtc->drm_crtc);
crtc->event = NULL;
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
}
int logicvc_crtc_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct logicvc_crtc *crtc;
struct logicvc_layer *layer_primary;
int ret;
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
if (!crtc)
return -ENOMEM;
layer_primary = logicvc_layer_get_primary(logicvc);
if (!layer_primary) {
drm_err(drm_dev, "Failed to get primary layer\n");
return -EINVAL;
}
ret = drm_crtc_init_with_planes(drm_dev, &crtc->drm_crtc,
&layer_primary->drm_plane, NULL,
&logicvc_crtc_funcs, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize CRTC\n");
return ret;
}
drm_crtc_helper_add(&crtc->drm_crtc, &logicvc_crtc_helper_funcs);
crtc->drm_crtc.port = of_graph_get_port_by_id(of_node, 1);
logicvc->crtc = crtc;
return 0;
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_CRTC_H_
#define _LOGICVC_CRTC_H_
struct drm_pending_vblank_event;
struct logicvc_drm;
struct logicvc_crtc {
struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *event;
};
void logicvc_crtc_vblank_handler(struct logicvc_drm *logicvc);
int logicvc_crtc_init(struct logicvc_drm *logicvc);
#endif

View File

@ -0,0 +1,496 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_mode.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
#include "logicvc_regs.h"
DEFINE_DRM_GEM_CMA_FOPS(logicvc_drm_fops);
static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
/* Stride is always fixed to its configuration value. */
args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
return drm_gem_cma_dumb_create_internal(file_priv, drm_dev, args);
}
static struct drm_driver logicvc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET |
DRIVER_ATOMIC,
.fops = &logicvc_drm_fops,
.name = "logicvc-drm",
.desc = "Xylon LogiCVC DRM driver",
.date = "20200403",
.major = 1,
.minor = 0,
DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_cma_dumb_create),
};
static struct regmap_config logicvc_drm_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.name = "logicvc-drm",
};
static irqreturn_t logicvc_drm_irq_handler(int irq, void *data)
{
struct logicvc_drm *logicvc = data;
irqreturn_t ret = IRQ_NONE;
u32 stat = 0;
/* Get pending interrupt sources. */
regmap_read(logicvc->regmap, LOGICVC_INT_STAT_REG, &stat);
/* Clear all pending interrupt sources. */
regmap_write(logicvc->regmap, LOGICVC_INT_STAT_REG, stat);
if (stat & LOGICVC_INT_STAT_V_SYNC) {
logicvc_crtc_vblank_handler(logicvc);
ret = IRQ_HANDLED;
}
return ret;
}
static int logicvc_drm_config_parse(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct logicvc_drm_config *config = &logicvc->config;
struct device_node *layers_node;
int ret;
logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING,
&config->dithering);
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
&config->background_layer);
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
&config->layers_configurable);
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE,
&config->display_interface);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
&config->display_colorspace);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
&config->display_depth);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_ROW_STRIDE,
&config->row_stride);
if (ret)
return ret;
layers_node = of_get_child_by_name(of_node, "layers");
if (!layers_node) {
drm_err(drm_dev, "Missing non-optional layers node\n");
return -EINVAL;
}
config->layers_count = of_get_child_count(layers_node);
if (!config->layers_count) {
drm_err(drm_dev,
"Missing a non-optional layers children node\n");
return -EINVAL;
}
return 0;
}
static int logicvc_clocks_prepare(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct {
struct clk **clk;
char *name;
bool optional;
} clocks_map[] = {
{
.clk = &logicvc->vclk,
.name = "vclk",
.optional = false,
},
{
.clk = &logicvc->vclk2,
.name = "vclk2",
.optional = true,
},
{
.clk = &logicvc->lvdsclk,
.name = "lvdsclk",
.optional = true,
},
{
.clk = &logicvc->lvdsclkn,
.name = "lvdsclkn",
.optional = true,
},
};
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
struct clk *clk;
clk = devm_clk_get(dev, clocks_map[i].name);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) == -ENOENT && clocks_map[i].optional)
continue;
drm_err(drm_dev, "Missing non-optional clock %s\n",
clocks_map[i].name);
ret = PTR_ERR(clk);
goto error;
}
ret = clk_prepare_enable(clk);
if (ret) {
drm_err(drm_dev,
"Failed to prepare and enable clock %s\n",
clocks_map[i].name);
goto error;
}
*clocks_map[i].clk = clk;
}
return 0;
error:
for (i = 0; i < ARRAY_SIZE(clocks_map); i++) {
if (!*clocks_map[i].clk)
continue;
clk_disable_unprepare(*clocks_map[i].clk);
*clocks_map[i].clk = NULL;
}
return ret;
}
static int logicvc_clocks_unprepare(struct logicvc_drm *logicvc)
{
struct clk **clocks[] = {
&logicvc->vclk,
&logicvc->vclk2,
&logicvc->lvdsclk,
&logicvc->lvdsclkn,
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(clocks); i++) {
if (!*clocks[i])
continue;
clk_disable_unprepare(*clocks[i]);
*clocks[i] = NULL;
}
return 0;
}
static const struct logicvc_drm_caps logicvc_drm_caps[] = {
{
.major = 3,
.layer_address = false,
},
{
.major = 4,
.layer_address = true,
},
{
.major = 5,
.layer_address = true,
},
};
static const struct logicvc_drm_caps *
logicvc_drm_caps_match(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
const struct logicvc_drm_caps *caps = NULL;
unsigned int major, minor;
char level;
unsigned int i;
u32 version;
regmap_read(logicvc->regmap, LOGICVC_IP_VERSION_REG, &version);
major = FIELD_GET(LOGICVC_IP_VERSION_MAJOR_MASK, version);
minor = FIELD_GET(LOGICVC_IP_VERSION_MINOR_MASK, version);
level = FIELD_GET(LOGICVC_IP_VERSION_LEVEL_MASK, version) + 'a';
for (i = 0; i < ARRAY_SIZE(logicvc_drm_caps); i++) {
if (logicvc_drm_caps[i].major &&
logicvc_drm_caps[i].major != major)
continue;
if (logicvc_drm_caps[i].minor &&
logicvc_drm_caps[i].minor != minor)
continue;
if (logicvc_drm_caps[i].level &&
logicvc_drm_caps[i].level != level)
continue;
caps = &logicvc_drm_caps[i];
}
drm_info(drm_dev, "LogiCVC version %d.%02d.%c\n", major, minor, level);
return caps;
}
static int logicvc_drm_probe(struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct device_node *reserved_mem_node;
struct reserved_mem *reserved_mem = NULL;
const struct logicvc_drm_caps *caps;
struct logicvc_drm *logicvc;
struct device *dev = &pdev->dev;
struct drm_device *drm_dev;
struct regmap *regmap = NULL;
struct resource res;
void __iomem *base;
int irq;
int ret;
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "Failed to init memory region\n");
goto error_early;
}
reserved_mem_node = of_parse_phandle(of_node, "memory-region", 0);
if (reserved_mem_node) {
reserved_mem = of_reserved_mem_lookup(reserved_mem_node);
of_node_put(reserved_mem_node);
}
/* Get regmap from parent if available. */
if (of_node->parent)
regmap = syscon_node_to_regmap(of_node->parent);
/* Register our own regmap otherwise. */
if (IS_ERR_OR_NULL(regmap)) {
ret = of_address_to_resource(of_node, 0, &res);
if (ret) {
dev_err(dev, "Failed to get resource from address\n");
goto error_reserved_mem;
}
base = devm_ioremap_resource(dev, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to map I/O base\n");
ret = PTR_ERR(base);
goto error_reserved_mem;
}
logicvc_drm_regmap_config.max_register = resource_size(&res) -
4;
regmap = devm_regmap_init_mmio(dev, base,
&logicvc_drm_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to create regmap for I/O\n");
ret = PTR_ERR(regmap);
goto error_reserved_mem;
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
goto error_reserved_mem;
}
logicvc = devm_drm_dev_alloc(dev, &logicvc_drm_driver,
struct logicvc_drm, drm_dev);
if (IS_ERR(logicvc)) {
ret = PTR_ERR(logicvc);
goto error_reserved_mem;
}
platform_set_drvdata(pdev, logicvc);
drm_dev = &logicvc->drm_dev;
logicvc->regmap = regmap;
INIT_LIST_HEAD(&logicvc->layers_list);
caps = logicvc_drm_caps_match(logicvc);
if (!caps) {
ret = -EINVAL;
goto error_reserved_mem;
}
logicvc->caps = caps;
if (reserved_mem)
logicvc->reserved_mem_base = reserved_mem->base;
ret = logicvc_clocks_prepare(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to prepare clocks\n");
goto error_reserved_mem;
}
ret = devm_request_irq(dev, irq, logicvc_drm_irq_handler, 0,
dev_name(dev), logicvc);
if (ret) {
drm_err(drm_dev, "Failed to request IRQ\n");
goto error_clocks;
}
ret = logicvc_drm_config_parse(logicvc);
if (ret && ret != -ENODEV) {
drm_err(drm_dev, "Failed to parse config\n");
goto error_clocks;
}
ret = drmm_mode_config_init(drm_dev);
if (ret) {
drm_err(drm_dev, "Failed to init mode config\n");
goto error_clocks;
}
ret = logicvc_layers_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize layers\n");
goto error_clocks;
}
ret = logicvc_crtc_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize CRTC\n");
goto error_clocks;
}
logicvc_layers_attach_crtc(logicvc);
ret = logicvc_interface_init(logicvc);
if (ret) {
if (ret != -EPROBE_DEFER)
drm_err(drm_dev, "Failed to initialize interface\n");
goto error_clocks;
}
logicvc_interface_attach_crtc(logicvc);
ret = logicvc_mode_init(logicvc);
if (ret) {
drm_err(drm_dev, "Failed to initialize KMS\n");
goto error_clocks;
}
ret = drm_dev_register(drm_dev, 0);
if (ret) {
drm_err(drm_dev, "Failed to register DRM device\n");
goto error_mode;
}
drm_fbdev_generic_setup(drm_dev, drm_dev->mode_config.preferred_depth);
return 0;
error_mode:
logicvc_mode_fini(logicvc);
error_clocks:
logicvc_clocks_unprepare(logicvc);
error_reserved_mem:
of_reserved_mem_device_release(dev);
error_early:
return ret;
}
static int logicvc_drm_remove(struct platform_device *pdev)
{
struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct drm_device *drm_dev = &logicvc->drm_dev;
drm_dev_unregister(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
logicvc_mode_fini(logicvc);
logicvc_clocks_unprepare(logicvc);
of_reserved_mem_device_release(dev);
return 0;
}
static const struct of_device_id logicvc_drm_of_table[] = {
{ .compatible = "xylon,logicvc-3.02.a-display" },
{ .compatible = "xylon,logicvc-4.01.a-display" },
{},
};
MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
static struct platform_driver logicvc_drm_platform_driver = {
.probe = logicvc_drm_probe,
.remove = logicvc_drm_remove,
.driver = {
.name = "logicvc-drm",
.of_match_table = logicvc_drm_of_table,
},
};
module_platform_driver(logicvc_drm_platform_driver);
MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
MODULE_DESCRIPTION("Xylon LogiCVC DRM driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_DRM_H_
#define _LOGICVC_DRM_H_
#include <linux/regmap.h>
#include <linux/types.h>
#include <drm/drm_device.h>
#define LOGICVC_DISPLAY_INTERFACE_RGB 0
#define LOGICVC_DISPLAY_INTERFACE_ITU656 1
#define LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS 2
#define LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA 3
#define LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS 4
#define LOGICVC_DISPLAY_INTERFACE_DVI 5
#define LOGICVC_DISPLAY_COLORSPACE_RGB 0
#define LOGICVC_DISPLAY_COLORSPACE_YUV422 1
#define LOGICVC_DISPLAY_COLORSPACE_YUV444 2
#define logicvc_drm(d) \
container_of(d, struct logicvc_drm, drm_dev)
struct logicvc_crtc;
struct logicvc_interface;
struct logicvc_drm_config {
u32 display_interface;
u32 display_colorspace;
u32 display_depth;
u32 row_stride;
bool dithering;
bool background_layer;
bool layers_configurable;
u32 layers_count;
};
struct logicvc_drm_caps {
unsigned int major;
unsigned int minor;
char level;
bool layer_address;
};
struct logicvc_drm {
const struct logicvc_drm_caps *caps;
struct logicvc_drm_config config;
struct drm_device drm_dev;
phys_addr_t reserved_mem_base;
struct regmap *regmap;
struct clk *vclk;
struct clk *vclk2;
struct clk *lvdsclk;
struct clk *lvdsclkn;
struct list_head layers_list;
struct logicvc_crtc *crtc;
struct logicvc_interface *interface;
};
#endif

View File

@ -0,0 +1,214 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_regs.h"
#define logicvc_interface_from_drm_encoder(c) \
container_of(c, struct logicvc_interface, drm_encoder)
#define logicvc_interface_from_drm_connector(c) \
container_of(c, struct logicvc_interface, drm_connector)
static void logicvc_encoder_enable(struct drm_encoder *drm_encoder)
{
struct logicvc_drm *logicvc = logicvc_drm(drm_encoder->dev);
struct logicvc_interface *interface =
logicvc_interface_from_drm_encoder(drm_encoder);
regmap_update_bits(logicvc->regmap, LOGICVC_POWER_CTRL_REG,
LOGICVC_POWER_CTRL_VIDEO_ENABLE,
LOGICVC_POWER_CTRL_VIDEO_ENABLE);
if (interface->drm_panel) {
drm_panel_prepare(interface->drm_panel);
drm_panel_enable(interface->drm_panel);
}
}
static void logicvc_encoder_disable(struct drm_encoder *drm_encoder)
{
struct logicvc_interface *interface =
logicvc_interface_from_drm_encoder(drm_encoder);
if (interface->drm_panel) {
drm_panel_disable(interface->drm_panel);
drm_panel_unprepare(interface->drm_panel);
}
}
static const struct drm_encoder_helper_funcs logicvc_encoder_helper_funcs = {
.enable = logicvc_encoder_enable,
.disable = logicvc_encoder_disable,
};
static const struct drm_encoder_funcs logicvc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int logicvc_connector_get_modes(struct drm_connector *drm_connector)
{
struct logicvc_interface *interface =
logicvc_interface_from_drm_connector(drm_connector);
if (interface->drm_panel)
return drm_panel_get_modes(interface->drm_panel, drm_connector);
WARN_ONCE(1, "Retrieving modes from a native connector is not implemented.");
return 0;
}
static const struct drm_connector_helper_funcs logicvc_connector_helper_funcs = {
.get_modes = logicvc_connector_get_modes,
};
static const struct drm_connector_funcs logicvc_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int logicvc_interface_encoder_type(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
return DRM_MODE_ENCODER_LVDS;
case LOGICVC_DISPLAY_INTERFACE_DVI:
return DRM_MODE_ENCODER_TMDS;
case LOGICVC_DISPLAY_INTERFACE_RGB:
return DRM_MODE_ENCODER_DPI;
default:
return DRM_MODE_ENCODER_NONE;
}
}
static int logicvc_interface_connector_type(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS:
case LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS_CAMERA:
case LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS:
return DRM_MODE_CONNECTOR_LVDS;
case LOGICVC_DISPLAY_INTERFACE_DVI:
return DRM_MODE_CONNECTOR_DVID;
case LOGICVC_DISPLAY_INTERFACE_RGB:
return DRM_MODE_CONNECTOR_DPI;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
}
static bool logicvc_interface_native_connector(struct logicvc_drm *logicvc)
{
switch (logicvc->config.display_interface) {
case LOGICVC_DISPLAY_INTERFACE_DVI:
return true;
default:
return false;
}
}
void logicvc_interface_attach_crtc(struct logicvc_drm *logicvc)
{
uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
logicvc->interface->drm_encoder.possible_crtcs = possible_crtcs;
}
int logicvc_interface_init(struct logicvc_drm *logicvc)
{
struct logicvc_interface *interface;
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
int encoder_type = logicvc_interface_encoder_type(logicvc);
int connector_type = logicvc_interface_connector_type(logicvc);
bool native_connector = logicvc_interface_native_connector(logicvc);
int ret;
interface = devm_kzalloc(dev, sizeof(*interface), GFP_KERNEL);
if (!interface) {
ret = -ENOMEM;
goto error_early;
}
ret = drm_of_find_panel_or_bridge(of_node, 0, 0, &interface->drm_panel,
&interface->drm_bridge);
if (ret == -EPROBE_DEFER)
goto error_early;
ret = drm_encoder_init(drm_dev, &interface->drm_encoder,
&logicvc_encoder_funcs, encoder_type, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize encoder\n");
goto error_early;
}
drm_encoder_helper_add(&interface->drm_encoder,
&logicvc_encoder_helper_funcs);
if (native_connector || interface->drm_panel) {
ret = drm_connector_init(drm_dev, &interface->drm_connector,
&logicvc_connector_funcs,
connector_type);
if (ret) {
drm_err(drm_dev, "Failed to initialize connector\n");
goto error_encoder;
}
drm_connector_helper_add(&interface->drm_connector,
&logicvc_connector_helper_funcs);
ret = drm_connector_attach_encoder(&interface->drm_connector,
&interface->drm_encoder);
if (ret) {
drm_err(drm_dev,
"Failed to attach connector to encoder\n");
goto error_encoder;
}
}
if (interface->drm_bridge) {
ret = drm_bridge_attach(&interface->drm_encoder,
interface->drm_bridge, NULL, 0);
if (ret) {
drm_err(drm_dev,
"Failed to attach bridge to encoder\n");
goto error_encoder;
}
}
logicvc->interface = interface;
return 0;
error_encoder:
drm_encoder_cleanup(&interface->drm_encoder);
error_early:
return ret;
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_INTERFACE_H_
#define _LOGICVC_INTERFACE_H_
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_panel.h>
struct logicvc_drm;
struct logicvc_interface {
struct drm_encoder drm_encoder;
struct drm_connector drm_connector;
struct drm_panel *drm_panel;
struct drm_bridge *drm_bridge;
};
void logicvc_interface_attach_crtc(struct logicvc_drm *logicvc);
int logicvc_interface_init(struct logicvc_drm *logicvc);
#endif

View File

@ -0,0 +1,631 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/of.h>
#include <linux/types.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
#include "logicvc_drm.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
#include "logicvc_regs.h"
#define logicvc_layer(p) \
container_of(p, struct logicvc_layer, drm_plane)
static uint32_t logicvc_layer_formats_rgb16[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_INVALID,
};
static uint32_t logicvc_layer_formats_rgb24[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_INVALID,
};
/*
* What we call depth in this driver only counts color components, not alpha.
* This allows us to stay compatible with the LogiCVC bistream definitions.
*/
static uint32_t logicvc_layer_formats_rgb24_alpha[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_INVALID,
};
static struct logicvc_layer_formats logicvc_layer_formats[] = {
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 16,
.formats = logicvc_layer_formats_rgb16,
},
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 24,
.formats = logicvc_layer_formats_rgb24,
},
{
.colorspace = LOGICVC_LAYER_COLORSPACE_RGB,
.depth = 24,
.alpha = true,
.formats = logicvc_layer_formats_rgb24_alpha,
},
{ }
};
static bool logicvc_layer_format_inverted(uint32_t format)
{
switch (format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return true;
default:
return false;
}
}
static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct drm_device *drm_dev = drm_plane->dev;
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_dev);
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, drm_plane);
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
bool can_position;
int ret;
if (!new_state->crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(new_state->state,
new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (new_state->crtc_x < 0 || new_state->crtc_y < 0) {
drm_err(drm_dev,
"Negative on-CRTC positions are not supported.\n");
return -EINVAL;
}
if (!logicvc->caps->layer_address) {
ret = logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
NULL);
if (ret) {
drm_err(drm_dev, "No viable setup for buffer found.\n");
return ret;
}
}
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
can_position = (drm_plane->type == DRM_PLANE_TYPE_OVERLAY &&
layer->index != (logicvc->config.layers_count - 1) &&
logicvc->config.layers_configurable);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
min_scale, max_scale,
can_position, true);
if (ret) {
drm_err(drm_dev, "Invalid plane state\n\n");
return ret;
}
return 0;
}
static void logicvc_plane_atomic_update(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, drm_plane);
struct drm_crtc *drm_crtc = &logicvc->crtc->drm_crtc;
struct drm_display_mode *mode = &drm_crtc->state->adjusted_mode;
struct drm_framebuffer *fb = new_state->fb;
struct logicvc_layer_buffer_setup setup = {};
u32 index = layer->index;
u32 reg;
/* Layer dimensions */
regmap_write(logicvc->regmap, LOGICVC_LAYER_WIDTH_REG(index),
new_state->crtc_w - 1);
regmap_write(logicvc->regmap, LOGICVC_LAYER_HEIGHT_REG(index),
new_state->crtc_h - 1);
if (logicvc->caps->layer_address) {
phys_addr_t fb_addr = drm_fb_cma_get_gem_addr(fb, new_state, 0);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ADDRESS_REG(index),
fb_addr);
} else {
/* Rely on offsets to configure the address. */
logicvc_layer_buffer_find_setup(logicvc, layer, new_state,
&setup);
/* Layer memory offsets */
regmap_write(logicvc->regmap, LOGICVC_BUFFER_SEL_REG,
LOGICVC_BUFFER_SEL_VALUE(index, setup.buffer_sel));
regmap_write(logicvc->regmap, LOGICVC_LAYER_HOFFSET_REG(index),
setup.hoffset);
regmap_write(logicvc->regmap, LOGICVC_LAYER_VOFFSET_REG(index),
setup.voffset);
}
/* Layer position */
regmap_write(logicvc->regmap, LOGICVC_LAYER_HPOSITION_REG(index),
mode->hdisplay - 1 - new_state->crtc_x);
/* Vertical position must be set last to sync layer register changes. */
regmap_write(logicvc->regmap, LOGICVC_LAYER_VPOSITION_REG(index),
mode->vdisplay - 1 - new_state->crtc_y);
/* Layer alpha */
if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER) {
u32 alpha_bits;
u32 alpha_max;
u32 alpha;
switch (layer->config.depth) {
case 8:
alpha_bits = 3;
break;
case 16:
if (layer->config.colorspace ==
LOGICVC_LAYER_COLORSPACE_YUV)
alpha_bits = 8;
else
alpha_bits = 6;
break;
default:
alpha_bits = 8;
break;
}
alpha_max = BIT(alpha_bits) - 1;
alpha = new_state->alpha * alpha_max / DRM_BLEND_ALPHA_OPAQUE;
drm_dbg_kms(drm_dev, "Setting layer %d alpha to %d/%d\n", index,
alpha, alpha_max);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ALPHA_REG(index),
alpha);
}
/* Layer control */
reg = LOGICVC_LAYER_CTRL_ENABLE;
if (logicvc_layer_format_inverted(fb->format->format))
reg |= LOGICVC_LAYER_CTRL_PIXEL_FORMAT_INVERT;
reg |= LOGICVC_LAYER_CTRL_COLOR_KEY_DISABLE;
regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), reg);
}
static void logicvc_plane_atomic_disable(struct drm_plane *drm_plane,
struct drm_atomic_state *state)
{
struct logicvc_layer *layer = logicvc_layer(drm_plane);
struct logicvc_drm *logicvc = logicvc_drm(drm_plane->dev);
u32 index = layer->index;
regmap_write(logicvc->regmap, LOGICVC_LAYER_CTRL_REG(index), 0);
}
static struct drm_plane_helper_funcs logicvc_plane_helper_funcs = {
.atomic_check = logicvc_plane_atomic_check,
.atomic_update = logicvc_plane_atomic_update,
.atomic_disable = logicvc_plane_atomic_disable,
};
static const struct drm_plane_funcs logicvc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
struct logicvc_layer *layer,
struct drm_plane_state *state,
struct logicvc_layer_buffer_setup *setup)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_framebuffer *fb = state->fb;
/* All the supported formats have a single data plane. */
u32 layer_bytespp = fb->format->cpp[0];
u32 layer_stride = layer_bytespp * logicvc->config.row_stride;
u32 base_offset = layer->config.base_offset * layer_stride;
u32 buffer_offset = layer->config.buffer_offset * layer_stride;
u8 buffer_sel = 0;
u16 voffset = 0;
u16 hoffset = 0;
phys_addr_t fb_addr;
u32 fb_offset;
u32 gap;
if (!logicvc->reserved_mem_base) {
drm_err(drm_dev, "No reserved memory base was registered!\n");
return -ENOMEM;
}
fb_addr = drm_fb_cma_get_gem_addr(fb, state, 0);
if (fb_addr < logicvc->reserved_mem_base) {
drm_err(drm_dev,
"Framebuffer memory below reserved memory base!\n");
return -EINVAL;
}
fb_offset = (u32) (fb_addr - logicvc->reserved_mem_base);
if (fb_offset < base_offset) {
drm_err(drm_dev,
"Framebuffer offset below layer base offset!\n");
return -EINVAL;
}
gap = fb_offset - base_offset;
/* Use the possible video buffers selection. */
if (gap && buffer_offset) {
buffer_sel = gap / buffer_offset;
if (buffer_sel > LOGICVC_BUFFER_SEL_MAX)
buffer_sel = LOGICVC_BUFFER_SEL_MAX;
gap -= buffer_sel * buffer_offset;
}
/* Use the vertical offset. */
if (gap && layer_stride && logicvc->config.layers_configurable) {
voffset = gap / layer_stride;
if (voffset > LOGICVC_LAYER_VOFFSET_MAX)
voffset = LOGICVC_LAYER_VOFFSET_MAX;
gap -= voffset * layer_stride;
}
/* Use the horizontal offset. */
if (gap && layer_bytespp && logicvc->config.layers_configurable) {
hoffset = gap / layer_bytespp;
if (hoffset > LOGICVC_DIMENSIONS_MAX)
hoffset = LOGICVC_DIMENSIONS_MAX;
gap -= hoffset * layer_bytespp;
}
if (gap) {
drm_err(drm_dev,
"Unable to find layer %d buffer setup for 0x%x byte gap\n",
layer->index, fb_offset - base_offset);
return -EINVAL;
}
drm_dbg_kms(drm_dev, "Found layer %d buffer setup for 0x%x byte gap:\n",
layer->index, fb_offset - base_offset);
drm_dbg_kms(drm_dev, "- buffer_sel = 0x%x chunks of 0x%x bytes\n",
buffer_sel, buffer_offset);
drm_dbg_kms(drm_dev, "- voffset = 0x%x chunks of 0x%x bytes\n", voffset,
layer_stride);
drm_dbg_kms(drm_dev, "- hoffset = 0x%x chunks of 0x%x bytes\n", hoffset,
layer_bytespp);
if (setup) {
setup->buffer_sel = buffer_sel;
setup->voffset = voffset;
setup->hoffset = hoffset;
}
return 0;
}
static struct logicvc_layer_formats *logicvc_layer_formats_lookup(struct logicvc_layer *layer)
{
bool alpha;
unsigned int i = 0;
alpha = (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_PIXEL);
while (logicvc_layer_formats[i].formats) {
if (logicvc_layer_formats[i].colorspace == layer->config.colorspace &&
logicvc_layer_formats[i].depth == layer->config.depth &&
logicvc_layer_formats[i].alpha == alpha)
return &logicvc_layer_formats[i];
i++;
}
return NULL;
}
static unsigned int logicvc_layer_formats_count(struct logicvc_layer_formats *formats)
{
unsigned int count = 0;
while (formats->formats[count] != DRM_FORMAT_INVALID)
count++;
return count;
}
static int logicvc_layer_config_parse(struct logicvc_drm *logicvc,
struct logicvc_layer *layer)
{
struct device_node *of_node = layer->of_node;
struct logicvc_layer_config *config = &layer->config;
int ret;
logicvc_of_property_parse_bool(of_node,
LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
&config->primary);
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
&config->colorspace);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_DEPTH,
&config->depth);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
&config->alpha_mode);
if (ret)
return ret;
/*
* Memory offset is only relevant without layer address configuration.
*/
if (logicvc->caps->layer_address)
return 0;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
&config->base_offset);
if (ret)
return ret;
ret = logicvc_of_property_parse_u32(of_node,
LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
&config->buffer_offset);
if (ret)
return ret;
return 0;
}
struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc,
u32 index)
{
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list)
if (layer->index == index)
return layer;
return NULL;
}
struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc,
enum drm_plane_type type)
{
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list)
if (layer->drm_plane.type == type)
return layer;
return NULL;
}
struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc)
{
return logicvc_layer_get_from_type(logicvc, DRM_PLANE_TYPE_PRIMARY);
}
static int logicvc_layer_init(struct logicvc_drm *logicvc,
struct device_node *of_node, u32 index)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct logicvc_layer *layer = NULL;
struct logicvc_layer_formats *formats;
unsigned int formats_count;
enum drm_plane_type type;
unsigned int zpos;
int ret;
layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
if (!layer) {
ret = -ENOMEM;
goto error;
}
layer->of_node = of_node;
layer->index = index;
ret = logicvc_layer_config_parse(logicvc, layer);
if (ret) {
drm_err(drm_dev, "Failed to parse config for layer #%d\n",
index);
goto error;
}
formats = logicvc_layer_formats_lookup(layer);
if (!formats) {
drm_err(drm_dev, "Failed to lookup formats for layer #%d\n",
index);
ret = -EINVAL;
goto error;
}
formats_count = logicvc_layer_formats_count(formats);
/* The final layer can be configured as a background layer. */
if (logicvc->config.background_layer &&
index == (logicvc->config.layers_count - 1)) {
/*
* A zero value for black is only valid for RGB, not for YUV,
* so this will need to take the format in account for YUV.
*/
u32 background = 0;
drm_dbg_kms(drm_dev, "Using layer #%d as background layer\n",
index);
regmap_write(logicvc->regmap, LOGICVC_BACKGROUND_COLOR_REG,
background);
devm_kfree(dev, layer);
return 0;
}
if (layer->config.primary)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(drm_dev, &layer->drm_plane, 0,
&logicvc_plane_funcs, formats->formats,
formats_count, NULL, type, NULL);
if (ret) {
drm_err(drm_dev, "Failed to initialize layer plane\n");
return ret;
}
drm_plane_helper_add(&layer->drm_plane, &logicvc_plane_helper_funcs);
zpos = logicvc->config.layers_count - index - 1;
drm_dbg_kms(drm_dev, "Giving layer #%d zpos %d\n", index, zpos);
if (layer->config.alpha_mode == LOGICVC_LAYER_ALPHA_LAYER)
drm_plane_create_alpha_property(&layer->drm_plane);
drm_plane_create_zpos_immutable_property(&layer->drm_plane, zpos);
drm_dbg_kms(drm_dev, "Registering layer #%d\n", index);
layer->formats = formats;
list_add_tail(&layer->list, &logicvc->layers_list);
return 0;
error:
if (layer)
devm_kfree(dev, layer);
return ret;
}
static void logicvc_layer_fini(struct logicvc_drm *logicvc,
struct logicvc_layer *layer)
{
struct device *dev = logicvc->drm_dev.dev;
list_del(&layer->list);
devm_kfree(dev, layer);
}
void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc)
{
uint32_t possible_crtcs = drm_crtc_mask(&logicvc->crtc->drm_crtc);
struct logicvc_layer *layer;
list_for_each_entry(layer, &logicvc->layers_list, list) {
if (layer->drm_plane.type != DRM_PLANE_TYPE_OVERLAY)
continue;
layer->drm_plane.possible_crtcs = possible_crtcs;
}
}
int logicvc_layers_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct device *dev = drm_dev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *layer_node = NULL;
struct device_node *layers_node;
struct logicvc_layer *layer;
struct logicvc_layer *next;
int ret = 0;
layers_node = of_get_child_by_name(of_node, "layers");
if (!layers_node) {
drm_err(drm_dev, "No layers node found in the description\n");
ret = -ENODEV;
goto error;
}
for_each_child_of_node(layers_node, layer_node) {
u32 index = 0;
if (!logicvc_of_node_is_layer(layer_node))
continue;
ret = of_property_read_u32(layer_node, "reg", &index);
if (ret)
continue;
layer = logicvc_layer_get_from_index(logicvc, index);
if (layer) {
drm_err(drm_dev, "Duplicated entry for layer #%d\n",
index);
continue;
}
ret = logicvc_layer_init(logicvc, layer_node, index);
if (ret) {
of_node_put(layers_node);
goto error;
}
}
of_node_put(layers_node);
return 0;
error:
list_for_each_entry_safe(layer, next, &logicvc->layers_list, list)
logicvc_layer_fini(logicvc, layer);
return ret;
}

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_LAYER_H_
#define _LOGICVC_LAYER_H_
#include <linux/of.h>
#include <linux/types.h>
#include <drm/drm_plane.h>
#define LOGICVC_LAYER_COLORSPACE_RGB 0
#define LOGICVC_LAYER_COLORSPACE_YUV 1
#define LOGICVC_LAYER_ALPHA_LAYER 0
#define LOGICVC_LAYER_ALPHA_PIXEL 1
struct logicvc_layer_buffer_setup {
u8 buffer_sel;
u16 voffset;
u16 hoffset;
};
struct logicvc_layer_config {
u32 colorspace;
u32 depth;
u32 alpha_mode;
u32 base_offset;
u32 buffer_offset;
bool primary;
};
struct logicvc_layer_formats {
u32 colorspace;
u32 depth;
bool alpha;
uint32_t *formats;
};
struct logicvc_layer {
struct logicvc_layer_config config;
struct logicvc_layer_formats *formats;
struct device_node *of_node;
struct drm_plane drm_plane;
struct list_head list;
u32 index;
};
int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
struct logicvc_layer *layer,
struct drm_plane_state *state,
struct logicvc_layer_buffer_setup *setup);
struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc,
u32 index);
struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc,
enum drm_plane_type type);
struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc);
void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc);
int logicvc_layers_init(struct logicvc_drm *logicvc);
#endif

View File

@ -0,0 +1,80 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/types.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "logicvc_drm.h"
#include "logicvc_interface.h"
#include "logicvc_layer.h"
#include "logicvc_mode.h"
static const struct drm_mode_config_funcs logicvc_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int logicvc_mode_init(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
struct drm_mode_config *mode_config = &drm_dev->mode_config;
struct logicvc_layer *layer_primary;
uint32_t preferred_depth;
int ret;
ret = drm_vblank_init(drm_dev, mode_config->num_crtc);
if (ret) {
drm_err(drm_dev, "Failed to initialize vblank\n");
return ret;
}
layer_primary = logicvc_layer_get_primary(logicvc);
if (!layer_primary) {
drm_err(drm_dev, "Failed to get primary layer\n");
return -EINVAL;
}
preferred_depth = layer_primary->formats->depth;
/* DRM counts alpha in depth, our driver doesn't. */
if (layer_primary->formats->alpha)
preferred_depth += 8;
mode_config->min_width = 64;
mode_config->max_width = 2048;
mode_config->min_height = 1;
mode_config->max_height = 2048;
mode_config->preferred_depth = preferred_depth;
mode_config->funcs = &logicvc_mode_config_funcs;
drm_mode_config_reset(drm_dev);
drm_kms_helper_poll_init(drm_dev);
return 0;
}
void logicvc_mode_fini(struct logicvc_drm *logicvc)
{
struct drm_device *drm_dev = &logicvc->drm_dev;
drm_kms_helper_poll_fini(drm_dev);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_MODE_H_
#define _LOGICVC_MODE_H_
struct logicvc_drm;
int logicvc_mode_init(struct logicvc_drm *logicvc);
void logicvc_mode_fini(struct logicvc_drm *logicvc);
#endif

View File

@ -0,0 +1,185 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <drm/drm_print.h>
#include "logicvc_drm.h"
#include "logicvc_layer.h"
#include "logicvc_of.h"
static struct logicvc_of_property_sv logicvc_of_display_interface_sv[] = {
{ "lvds-4bits", LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS },
{ "lvds-3bits", LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS },
{ },
};
static struct logicvc_of_property_sv logicvc_of_display_colorspace_sv[] = {
{ "rgb", LOGICVC_DISPLAY_COLORSPACE_RGB },
{ "yuv422", LOGICVC_DISPLAY_COLORSPACE_YUV422 },
{ "yuv444", LOGICVC_DISPLAY_COLORSPACE_YUV444 },
{ },
};
static struct logicvc_of_property_sv logicvc_of_layer_colorspace_sv[] = {
{ "rgb", LOGICVC_LAYER_COLORSPACE_RGB },
{ "yuv", LOGICVC_LAYER_COLORSPACE_YUV },
{ },
};
static struct logicvc_of_property_sv logicvc_of_layer_alpha_mode_sv[] = {
{ "layer", LOGICVC_LAYER_ALPHA_LAYER },
{ "pixel", LOGICVC_LAYER_ALPHA_PIXEL },
{ },
};
static struct logicvc_of_property logicvc_of_properties[] = {
[LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE] = {
.name = "xylon,display-interface",
.sv = logicvc_of_display_interface_sv,
.range = {
LOGICVC_DISPLAY_INTERFACE_LVDS_4BITS,
LOGICVC_DISPLAY_INTERFACE_LVDS_3BITS,
},
},
[LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE] = {
.name = "xylon,display-colorspace",
.sv = logicvc_of_display_colorspace_sv,
.range = {
LOGICVC_DISPLAY_COLORSPACE_RGB,
LOGICVC_DISPLAY_COLORSPACE_YUV444,
},
},
[LOGICVC_OF_PROPERTY_DISPLAY_DEPTH] = {
.name = "xylon,display-depth",
.range = { 8, 24 },
},
[LOGICVC_OF_PROPERTY_ROW_STRIDE] = {
.name = "xylon,row-stride",
},
[LOGICVC_OF_PROPERTY_DITHERING] = {
.name = "xylon,dithering",
.optional = true,
},
[LOGICVC_OF_PROPERTY_BACKGROUND_LAYER] = {
.name = "xylon,background-layer",
.optional = true,
},
[LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE] = {
.name = "xylon,layers-configurable",
.optional = true,
},
[LOGICVC_OF_PROPERTY_LAYERS_COUNT] = {
.name = "xylon,layers-count",
},
[LOGICVC_OF_PROPERTY_LAYER_DEPTH] = {
.name = "xylon,layer-depth",
.range = { 8, 24 },
},
[LOGICVC_OF_PROPERTY_LAYER_COLORSPACE] = {
.name = "xylon,layer-colorspace",
.sv = logicvc_of_layer_colorspace_sv,
.range = {
LOGICVC_LAYER_COLORSPACE_RGB,
LOGICVC_LAYER_COLORSPACE_RGB,
},
},
[LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE] = {
.name = "xylon,layer-alpha-mode",
.sv = logicvc_of_layer_alpha_mode_sv,
.range = {
LOGICVC_LAYER_ALPHA_LAYER,
LOGICVC_LAYER_ALPHA_PIXEL,
},
},
[LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET] = {
.name = "xylon,layer-base-offset",
},
[LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET] = {
.name = "xylon,layer-buffer-offset",
},
[LOGICVC_OF_PROPERTY_LAYER_PRIMARY] = {
.name = "xylon,layer-primary",
.optional = true,
},
};
static int logicvc_of_property_sv_value(struct logicvc_of_property_sv *sv,
const char *string, u32 *value)
{
unsigned int i = 0;
while (sv[i].string) {
if (!strcmp(sv[i].string, string)) {
*value = sv[i].value;
return 0;
}
i++;
}
return -EINVAL;
}
int logicvc_of_property_parse_u32(struct device_node *of_node,
unsigned int index, u32 *target)
{
struct logicvc_of_property *property;
const char *string;
u32 value;
int ret;
if (index >= LOGICVC_OF_PROPERTY_MAXIMUM)
return -EINVAL;
property = &logicvc_of_properties[index];
if (!property->optional &&
!of_property_read_bool(of_node, property->name))
return -ENODEV;
if (property->sv) {
ret = of_property_read_string(of_node, property->name, &string);
if (ret)
return ret;
ret = logicvc_of_property_sv_value(property->sv, string,
&value);
if (ret)
return ret;
} else {
ret = of_property_read_u32(of_node, property->name, &value);
if (ret)
return ret;
}
if (property->range[0] || property->range[1])
if (value < property->range[0] || value > property->range[1])
return -ERANGE;
*target = value;
return 0;
}
void logicvc_of_property_parse_bool(struct device_node *of_node,
unsigned int index, bool *target)
{
struct logicvc_of_property *property;
if (index >= LOGICVC_OF_PROPERTY_MAXIMUM) {
/* Fallback. */
*target = false;
return;
}
property = &logicvc_of_properties[index];
*target = of_property_read_bool(of_node, property->name);
}
bool logicvc_of_node_is_layer(struct device_node *of_node)
{
return !of_node_cmp(of_node->name, "layer");
}

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _LOGICVC_OF_H_
#define _LOGICVC_OF_H_
enum logicvc_of_property_index {
LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE = 0,
LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
LOGICVC_OF_PROPERTY_ROW_STRIDE,
LOGICVC_OF_PROPERTY_DITHERING,
LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
LOGICVC_OF_PROPERTY_LAYERS_COUNT,
LOGICVC_OF_PROPERTY_LAYER_DEPTH,
LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
LOGICVC_OF_PROPERTY_MAXIMUM,
};
struct logicvc_of_property_sv {
const char *string;
u32 value;
};
struct logicvc_of_property {
char *name;
bool optional;
struct logicvc_of_property_sv *sv;
u32 range[2];
};
int logicvc_of_property_parse_u32(struct device_node *of_node,
unsigned int index, u32 *target);
void logicvc_of_property_parse_bool(struct device_node *of_node,
unsigned int index, bool *target);
bool logicvc_of_node_is_layer(struct device_node *of_node);
#endif

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*
* Copyright (C) 2014 Xylon d.o.o.
* Author: Davor Joja <davor.joja@logicbricks.com>
*/
#ifndef _LOGICVC_REGS_H_
#define _LOGICVC_REGS_H_
#define LOGICVC_DIMENSIONS_MAX (BIT(16) - 1)
#define LOGICVC_HSYNC_FRONT_PORCH_REG 0x00
#define LOGICVC_HSYNC_REG 0x08
#define LOGICVC_HSYNC_BACK_PORCH_REG 0x10
#define LOGICVC_HRES_REG 0x18
#define LOGICVC_VSYNC_FRONT_PORCH_REG 0x20
#define LOGICVC_VSYNC_REG 0x28
#define LOGICVC_VSYNC_BACK_PORCH_REG 0x30
#define LOGICVC_VRES_REG 0x38
#define LOGICVC_CTRL_REG 0x40
#define LOGICVC_CTRL_CLOCK_INVERT BIT(8)
#define LOGICVC_CTRL_PIXEL_INVERT BIT(7)
#define LOGICVC_CTRL_DE_INVERT BIT(5)
#define LOGICVC_CTRL_DE_ENABLE BIT(4)
#define LOGICVC_CTRL_VSYNC_INVERT BIT(3)
#define LOGICVC_CTRL_VSYNC_ENABLE BIT(2)
#define LOGICVC_CTRL_HSYNC_INVERT BIT(1)
#define LOGICVC_CTRL_HSYNC_ENABLE BIT(0)
#define LOGICVC_DTYPE_REG 0x48
#define LOGICVC_BACKGROUND_COLOR_REG 0x50
#define LOGICVC_BUFFER_SEL_REG 0x58
#define LOGICVC_BUFFER_SEL_VALUE(i, v) \
(BIT(10 + (i)) | ((v) << (2 * (i))))
#define LOGICVC_BUFFER_SEL_MAX 2
#define LOGICVC_DOUBLE_CLUT_REG 0x60
#define LOGICVC_INT_STAT_REG 0x68
#define LOGICVC_INT_STAT_V_SYNC BIT(5)
#define LOGICVC_INT_MASK_REG 0x70
#define LOGICVC_INT_MASK_V_SYNC BIT(5)
#define LOGICVC_POWER_CTRL_REG 0x78
#define LOGICVC_POWER_CTRL_BACKLIGHT_ENABLE BIT(0)
#define LOGICVC_POWER_CTRL_VDD_ENABLE BIT(1)
#define LOGICVC_POWER_CTRL_VEE_ENABLE BIT(2)
#define LOGICVC_POWER_CTRL_VIDEO_ENABLE BIT(3)
#define LOGICVC_IP_VERSION_REG 0xf8
#define LOGICVC_IP_VERSION_MAJOR_MASK GENMASK(16, 11)
#define LOGICVC_IP_VERSION_MINOR_MASK GENMASK(10, 5)
#define LOGICVC_IP_VERSION_LEVEL_MASK GENMASK(4, 0)
#define LOGICVC_LAYER_ADDRESS_REG(i) (0x100 + (i) * 0x80)
#define LOGICVC_LAYER_HOFFSET_REG(i) (0x100 + (i) * 0x80)
#define LOGICVC_LAYER_VOFFSET_REG(i) (0x108 + (i) * 0x80)
#define LOGICVC_LAYER_VOFFSET_MAX 4095
#define LOGICVC_LAYER_HPOSITION_REG(i) (0x110 + (i) * 0x80)
#define LOGICVC_LAYER_VPOSITION_REG(i) (0x118 + (i) * 0x80)
#define LOGICVC_LAYER_WIDTH_REG(i) (0x120 + (i) * 0x80)
#define LOGICVC_LAYER_HEIGHT_REG(i) (0x128 + (i) * 0x80)
#define LOGICVC_LAYER_ALPHA_REG(i) (0x130 + (i) * 0x80)
#define LOGICVC_LAYER_CTRL_REG(i) (0x138 + (i) * 0x80)
#define LOGICVC_LAYER_CTRL_ENABLE BIT(0)
#define LOGICVC_LAYER_CTRL_COLOR_KEY_DISABLE BIT(1)
#define LOGICVC_LAYER_CTRL_PIXEL_FORMAT_INVERT BIT(4)
#define LOGICVC_LAYER_COLOR_KEY_REG(i) (0x140 + (i) * 0x80)
#endif

View File

@ -0,0 +1,320 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_disp_drv.h"
#define DISP_REG_MERGE_CTRL 0x000
#define MERGE_EN 1
#define DISP_REG_MERGE_CFG_0 0x010
#define DISP_REG_MERGE_CFG_1 0x014
#define DISP_REG_MERGE_CFG_4 0x020
#define DISP_REG_MERGE_CFG_10 0x038
/* no swap */
#define SWAP_MODE 0
#define FLD_SWAP_MODE GENMASK(4, 0)
#define DISP_REG_MERGE_CFG_12 0x040
#define CFG_10_10_1PI_2PO_BUF_MODE 6
#define CFG_10_10_2PI_2PO_BUF_MODE 8
#define CFG_11_10_1PI_2PO_MERGE 18
#define FLD_CFG_MERGE_MODE GENMASK(4, 0)
#define DISP_REG_MERGE_CFG_24 0x070
#define DISP_REG_MERGE_CFG_25 0x074
#define DISP_REG_MERGE_CFG_26 0x078
#define DISP_REG_MERGE_CFG_27 0x07c
#define DISP_REG_MERGE_CFG_36 0x0a0
#define ULTRA_EN BIT(0)
#define PREULTRA_EN BIT(4)
#define DISP_REG_MERGE_CFG_37 0x0a4
/* 0: Off, 1: SRAM0, 2: SRAM1, 3: SRAM0 + SRAM1 */
#define BUFFER_MODE 3
#define FLD_BUFFER_MODE GENMASK(1, 0)
/*
* For the ultra and preultra settings, 6us ~ 9us is experience value
* and the maximum frequency of mmsys clock is 594MHz.
*/
#define DISP_REG_MERGE_CFG_40 0x0b0
/* 6 us, 594M pixel/sec */
#define ULTRA_TH_LOW (6 * 594)
/* 8 us, 594M pixel/sec */
#define ULTRA_TH_HIGH (8 * 594)
#define FLD_ULTRA_TH_LOW GENMASK(15, 0)
#define FLD_ULTRA_TH_HIGH GENMASK(31, 16)
#define DISP_REG_MERGE_CFG_41 0x0b4
/* 8 us, 594M pixel/sec */
#define PREULTRA_TH_LOW (8 * 594)
/* 9 us, 594M pixel/sec */
#define PREULTRA_TH_HIGH (9 * 594)
#define FLD_PREULTRA_TH_LOW GENMASK(15, 0)
#define FLD_PREULTRA_TH_HIGH GENMASK(31, 16)
#define DISP_REG_MERGE_MUTE_0 0xf00
struct mtk_disp_merge {
void __iomem *regs;
struct clk *clk;
struct clk *async_clk;
struct cmdq_client_reg cmdq_reg;
bool fifo_en;
bool mute_support;
struct reset_control *reset_ctl;
};
void mtk_merge_start(struct device *dev)
{
mtk_merge_start_cmdq(dev, NULL);
}
void mtk_merge_stop(struct device *dev)
{
mtk_merge_stop_cmdq(dev, NULL);
}
void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
if (priv->mute_support)
mtk_ddp_write(cmdq_pkt, 0x0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_MUTE_0);
mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);
}
void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
if (priv->mute_support)
mtk_ddp_write(cmdq_pkt, 0x1, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_MUTE_0);
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CTRL);
if (priv->async_clk)
reset_control_reset(priv->reset_ctl);
}
static void mtk_merge_fifo_setting(struct mtk_disp_merge *priv,
struct cmdq_pkt *cmdq_pkt)
{
mtk_ddp_write(cmdq_pkt, ULTRA_EN | PREULTRA_EN,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_36);
mtk_ddp_write_mask(cmdq_pkt, BUFFER_MODE,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_37,
FLD_BUFFER_MODE);
mtk_ddp_write_mask(cmdq_pkt, ULTRA_TH_LOW | ULTRA_TH_HIGH << 16,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_40,
FLD_ULTRA_TH_LOW | FLD_ULTRA_TH_HIGH);
mtk_ddp_write_mask(cmdq_pkt, PREULTRA_TH_LOW | PREULTRA_TH_HIGH << 16,
&priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_41,
FLD_PREULTRA_TH_LOW | FLD_PREULTRA_TH_HIGH);
}
void mtk_merge_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
mtk_merge_advance_config(dev, w, 0, h, vrefresh, bpc, cmdq_pkt);
}
void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w,
unsigned int h, unsigned int vrefresh, unsigned int bpc,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
unsigned int mode = CFG_10_10_1PI_2PO_BUF_MODE;
if (!h || !l_w) {
dev_err(dev, "%s: input width(%d) or height(%d) is invalid\n", __func__, l_w, h);
return;
}
if (priv->fifo_en) {
mtk_merge_fifo_setting(priv, cmdq_pkt);
mode = CFG_10_10_2PI_2PO_BUF_MODE;
}
if (r_w)
mode = CFG_11_10_1PI_2PO_MERGE;
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_0);
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_1);
mtk_ddp_write(cmdq_pkt, h << 16 | (l_w + r_w), &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_4);
/*
* DISP_REG_MERGE_CFG_24 is merge SRAM0 w/h
* DISP_REG_MERGE_CFG_25 is merge SRAM1 w/h.
* If r_w > 0, the merge is in merge mode (input0 and input1 merge together),
* the input0 goes to SRAM0, and input1 goes to SRAM1.
* If r_w = 0, the merge is in buffer mode, the input goes through SRAM0 and
* then to SRAM1. Both SRAM0 and SRAM1 are set to the same size.
*/
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_24);
if (r_w)
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_25);
else
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_25);
/*
* DISP_REG_MERGE_CFG_26 and DISP_REG_MERGE_CFG_27 is only used in LR merge.
* Only take effect when the merge is setting to merge mode.
*/
mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_26);
mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_27);
mtk_ddp_write_mask(cmdq_pkt, SWAP_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_10, FLD_SWAP_MODE);
mtk_ddp_write_mask(cmdq_pkt, mode, &priv->cmdq_reg, priv->regs,
DISP_REG_MERGE_CFG_12, FLD_CFG_MERGE_MODE);
}
int mtk_merge_clk_enable(struct device *dev)
{
int ret = 0;
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "merge clk prepare enable failed\n");
return ret;
}
ret = clk_prepare_enable(priv->async_clk);
if (ret) {
/* should clean up the state of priv->clk */
clk_disable_unprepare(priv->clk);
dev_err(dev, "async clk prepare enable failed\n");
return ret;
}
return ret;
}
void mtk_merge_clk_disable(struct device *dev)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->async_clk);
clk_disable_unprepare(priv->clk);
}
static int mtk_disp_merge_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_disp_merge_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_disp_merge_component_ops = {
.bind = mtk_disp_merge_bind,
.unbind = mtk_disp_merge_unbind,
};
static int mtk_disp_merge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct mtk_disp_merge *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap merge\n");
return PTR_ERR(priv->regs);
}
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get merge clk\n");
return PTR_ERR(priv->clk);
}
priv->async_clk = devm_clk_get_optional(dev, "merge_async");
if (IS_ERR(priv->async_clk)) {
dev_err(dev, "failed to get merge async clock\n");
return PTR_ERR(priv->async_clk);
}
if (priv->async_clk) {
priv->reset_ctl = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(priv->reset_ctl))
return PTR_ERR(priv->reset_ctl);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
priv->fifo_en = of_property_read_bool(dev->of_node,
"mediatek,merge-fifo-en");
priv->mute_support = of_property_read_bool(dev->of_node,
"mediatek,merge-mute");
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_merge_component_ops);
if (ret != 0)
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
static int mtk_disp_merge_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_merge_component_ops);
return 0;
}
static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
{ .compatible = "mediatek,mt8195-disp-merge", },
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
struct platform_driver mtk_disp_merge_driver = {
.probe = mtk_disp_merge_probe,
.remove = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
.owner = THIS_MODULE,
.of_match_table = mtk_disp_merge_driver_dt_match,
},
};

View File

@ -0,0 +1,315 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_mdp_rdma.h"
#define MDP_RDMA_EN 0x000
#define FLD_ROT_ENABLE BIT(0)
#define MDP_RDMA_RESET 0x008
#define MDP_RDMA_CON 0x020
#define FLD_OUTPUT_10B BIT(5)
#define FLD_SIMPLE_MODE BIT(4)
#define MDP_RDMA_GMCIF_CON 0x028
#define FLD_COMMAND_DIV BIT(0)
#define FLD_EXT_PREULTRA_EN BIT(3)
#define FLD_RD_REQ_TYPE GENMASK(7, 4)
#define VAL_RD_REQ_TYPE_BURST_8_ACCESS 7
#define FLD_ULTRA_EN GENMASK(13, 12)
#define VAL_ULTRA_EN_ENABLE 1
#define FLD_PRE_ULTRA_EN GENMASK(17, 16)
#define VAL_PRE_ULTRA_EN_ENABLE 1
#define FLD_EXT_ULTRA_EN BIT(18)
#define MDP_RDMA_SRC_CON 0x030
#define FLD_OUTPUT_ARGB BIT(25)
#define FLD_BIT_NUMBER GENMASK(19, 18)
#define FLD_SWAP BIT(14)
#define FLD_UNIFORM_CONFIG BIT(17)
#define RDMA_INPUT_10BIT BIT(18)
#define FLD_SRC_FORMAT GENMASK(3, 0)
#define MDP_RDMA_COMP_CON 0x038
#define FLD_AFBC_EN BIT(22)
#define FLD_AFBC_YUV_TRANSFORM BIT(21)
#define FLD_UFBDC_EN BIT(12)
#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060
#define FLD_MF_BKGD_WB GENMASK(22, 0)
#define MDP_RDMA_MF_SRC_SIZE 0x070
#define FLD_MF_SRC_H GENMASK(30, 16)
#define FLD_MF_SRC_W GENMASK(14, 0)
#define MDP_RDMA_MF_CLIP_SIZE 0x078
#define FLD_MF_CLIP_H GENMASK(30, 16)
#define FLD_MF_CLIP_W GENMASK(14, 0)
#define MDP_RDMA_SRC_OFFSET_0 0x118
#define FLD_SRC_OFFSET_0 GENMASK(31, 0)
#define MDP_RDMA_TRANSFORM_0 0x200
#define FLD_INT_MATRIX_SEL GENMASK(27, 23)
#define FLD_TRANS_EN BIT(16)
#define MDP_RDMA_SRC_BASE_0 0xf00
#define FLD_SRC_BASE_0 GENMASK(31, 0)
#define RDMA_CSC_FULL709_TO_RGB 5
#define RDMA_CSC_BT601_TO_RGB 6
enum rdma_format {
RDMA_INPUT_FORMAT_RGB565 = 0,
RDMA_INPUT_FORMAT_RGB888 = 1,
RDMA_INPUT_FORMAT_RGBA8888 = 2,
RDMA_INPUT_FORMAT_ARGB8888 = 3,
RDMA_INPUT_FORMAT_UYVY = 4,
RDMA_INPUT_FORMAT_YUY2 = 5,
RDMA_INPUT_FORMAT_Y8 = 7,
RDMA_INPUT_FORMAT_YV12 = 8,
RDMA_INPUT_FORMAT_UYVY_3PL = 9,
RDMA_INPUT_FORMAT_NV12 = 12,
RDMA_INPUT_FORMAT_UYVY_2PL = 13,
RDMA_INPUT_FORMAT_Y410 = 14
};
struct mtk_mdp_rdma {
void __iomem *regs;
struct clk *clk;
struct cmdq_client_reg cmdq_reg;
};
static unsigned int rdma_fmt_convert(unsigned int fmt)
{
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
return RDMA_INPUT_FORMAT_RGB565;
case DRM_FORMAT_BGR565:
return RDMA_INPUT_FORMAT_RGB565 | FLD_SWAP;
case DRM_FORMAT_RGB888:
return RDMA_INPUT_FORMAT_RGB888;
case DRM_FORMAT_BGR888:
return RDMA_INPUT_FORMAT_RGB888 | FLD_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return RDMA_INPUT_FORMAT_ARGB8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return RDMA_INPUT_FORMAT_RGBA8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP;
case DRM_FORMAT_ABGR2101010:
return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP | RDMA_INPUT_10BIT;
case DRM_FORMAT_ARGB2101010:
return RDMA_INPUT_FORMAT_RGBA8888 | RDMA_INPUT_10BIT;
case DRM_FORMAT_RGBA1010102:
return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP | RDMA_INPUT_10BIT;
case DRM_FORMAT_BGRA1010102:
return RDMA_INPUT_FORMAT_ARGB8888 | RDMA_INPUT_10BIT;
case DRM_FORMAT_UYVY:
return RDMA_INPUT_FORMAT_UYVY;
case DRM_FORMAT_YUYV:
return RDMA_INPUT_FORMAT_YUY2;
}
}
static unsigned int rdma_color_convert(unsigned int color_encoding)
{
switch (color_encoding) {
default:
case DRM_COLOR_YCBCR_BT709:
return RDMA_CSC_FULL709_TO_RGB;
case DRM_COLOR_YCBCR_BT601:
return RDMA_CSC_BT601_TO_RGB;
}
}
static void mtk_mdp_rdma_fifo_config(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, FLD_EXT_ULTRA_EN | VAL_PRE_ULTRA_EN_ENABLE << 16 |
VAL_ULTRA_EN_ENABLE << 12 | VAL_RD_REQ_TYPE_BURST_8_ACCESS << 4 |
FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV, &priv->cmdq_reg,
priv->regs, MDP_RDMA_GMCIF_CON, FLD_EXT_ULTRA_EN |
FLD_PRE_ULTRA_EN | FLD_ULTRA_EN | FLD_RD_REQ_TYPE |
FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV);
}
void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, FLD_ROT_ENABLE, &priv->cmdq_reg,
priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
}
void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg,
priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE);
mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET);
}
void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
struct cmdq_pkt *cmdq_pkt)
{
struct mtk_mdp_rdma *priv = dev_get_drvdata(dev);
const struct drm_format_info *fmt_info = drm_format_info(cfg->fmt);
bool csc_enable = fmt_info->is_yuv ? true : false;
unsigned int src_pitch_y = cfg->pitch;
unsigned int offset_y = 0;
mtk_mdp_rdma_fifo_config(dev, cmdq_pkt);
mtk_ddp_write_mask(cmdq_pkt, FLD_UNIFORM_CONFIG, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_UNIFORM_CONFIG);
mtk_ddp_write_mask(cmdq_pkt, rdma_fmt_convert(cfg->fmt), &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_SWAP | FLD_SRC_FORMAT | FLD_BIT_NUMBER);
if (!csc_enable && fmt_info->has_alpha)
mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_ARGB, &priv->cmdq_reg,
priv->regs, MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
else
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB);
mtk_ddp_write_mask(cmdq_pkt, cfg->addr0, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_BASE_0, FLD_SRC_BASE_0);
mtk_ddp_write_mask(cmdq_pkt, src_pitch_y, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_BKGD_SIZE_IN_BYTE, FLD_MF_BKGD_WB);
mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_COMP_CON,
FLD_AFBC_YUV_TRANSFORM | FLD_UFBDC_EN | FLD_AFBC_EN);
mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_10B, &priv->cmdq_reg, priv->regs,
MDP_RDMA_CON, FLD_OUTPUT_10B);
mtk_ddp_write_mask(cmdq_pkt, FLD_SIMPLE_MODE, &priv->cmdq_reg, priv->regs,
MDP_RDMA_CON, FLD_SIMPLE_MODE);
if (csc_enable)
mtk_ddp_write_mask(cmdq_pkt, rdma_color_convert(cfg->color_encoding) << 23,
&priv->cmdq_reg, priv->regs, MDP_RDMA_TRANSFORM_0,
FLD_INT_MATRIX_SEL);
mtk_ddp_write_mask(cmdq_pkt, csc_enable << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_TRANSFORM_0, FLD_TRANS_EN);
offset_y = cfg->x_left * fmt_info->cpp[0] + cfg->y_top * src_pitch_y;
mtk_ddp_write_mask(cmdq_pkt, offset_y, &priv->cmdq_reg, priv->regs,
MDP_RDMA_SRC_OFFSET_0, FLD_SRC_OFFSET_0);
mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_W);
mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_H);
mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_W);
mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs,
MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_H);
}
int mtk_mdp_rdma_clk_enable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
clk_prepare_enable(rdma->clk);
return 0;
}
void mtk_mdp_rdma_clk_disable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
clk_disable_unprepare(rdma->clk);
}
static int mtk_mdp_rdma_bind(struct device *dev, struct device *master,
void *data)
{
return 0;
}
static void mtk_mdp_rdma_unbind(struct device *dev, struct device *master,
void *data)
{
}
static const struct component_ops mtk_mdp_rdma_component_ops = {
.bind = mtk_mdp_rdma_bind,
.unbind = mtk_mdp_rdma_unbind,
};
static int mtk_mdp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct mtk_mdp_rdma *priv;
int ret = 0;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap rdma\n");
return PTR_ERR(priv->regs);
}
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get rdma clk\n");
return PTR_ERR(priv->clk);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
if (ret)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
platform_set_drvdata(pdev, priv);
pm_runtime_enable(dev);
ret = component_add(dev, &mtk_mdp_rdma_component_ops);
if (ret != 0) {
pm_runtime_disable(dev);
dev_err(dev, "Failed to add component: %d\n", ret);
}
return ret;
}
static int mtk_mdp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_mdp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt8195-vdo1-rdma", },
{},
};
MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
struct platform_driver mtk_mdp_rdma_driver = {
.probe = mtk_mdp_rdma_probe,
.remove = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
.owner = THIS_MODULE,
.of_match_table = mtk_mdp_rdma_driver_dt_match,
},
};

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 MediaTek Inc.
*/
#ifndef __MTK_MDP_RDMA_H__
#define __MTK_MDP_RDMA_H__
struct mtk_mdp_rdma_cfg {
unsigned int pitch;
unsigned int addr0;
unsigned int width;
unsigned int height;
unsigned int x_left;
unsigned int y_top;
int fmt;
int color_encoding;
};
#endif // __MTK_MDP_RDMA_H__

View File

@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
static int mgag200_g200_init_pci_options(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
bool has_sgram;
u32 option;
int err;
err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
if (err != PCIBIOS_SUCCESSFUL) {
dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
return pcibios_err_to_errno(err);
}
has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK);
if (has_sgram)
option = 0x4049cd21;
else
option = 0x40499121;
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
/*
* DRM Device
*/
static const struct mgag200_device_info mgag200_g200_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 3, false);
static void mgag200_g200_interpret_bios(struct mgag200_g200_device *g200,
const unsigned char *bios, size_t size)
{
static const char matrox[] = {'M', 'A', 'T', 'R', 'O', 'X'};
static const unsigned int expected_length[6] = {
0, 64, 64, 64, 128, 128
};
struct mga_device *mdev = &g200->base;
struct drm_device *dev = &mdev->base;
const unsigned char *pins;
unsigned int pins_len, version;
int offset;
int tmp;
/* Test for MATROX string. */
if (size < 45 + sizeof(matrox))
return;
if (memcmp(&bios[45], matrox, sizeof(matrox)) != 0)
return;
/* Get the PInS offset. */
if (size < MGA_BIOS_OFFSET + 2)
return;
offset = (bios[MGA_BIOS_OFFSET + 1] << 8) | bios[MGA_BIOS_OFFSET];
/* Get PInS data structure. */
if (size < offset + 6)
return;
pins = bios + offset;
if (pins[0] == 0x2e && pins[1] == 0x41) {
version = pins[5];
pins_len = pins[2];
} else {
version = 1;
pins_len = pins[0] + (pins[1] << 8);
}
if (version < 1 || version > 5) {
drm_warn(dev, "Unknown BIOS PInS version: %d\n", version);
return;
}
if (pins_len != expected_length[version]) {
drm_warn(dev, "Unexpected BIOS PInS size: %d expected: %d\n",
pins_len, expected_length[version]);
return;
}
if (size < offset + pins_len)
return;
drm_dbg_kms(dev, "MATROX BIOS PInS version %d size: %d found\n", version, pins_len);
/* Extract the clock values */
switch (version) {
case 1:
tmp = pins[24] + (pins[25] << 8);
if (tmp)
g200->pclk_max = tmp * 10;
break;
case 2:
if (pins[41] != 0xff)
g200->pclk_max = (pins[41] + 100) * 1000;
break;
case 3:
if (pins[36] != 0xff)
g200->pclk_max = (pins[36] + 100) * 1000;
if (pins[52] & 0x20)
g200->ref_clk = 14318;
break;
case 4:
if (pins[39] != 0xff)
g200->pclk_max = pins[39] * 4 * 1000;
if (pins[92] & 0x01)
g200->ref_clk = 14318;
break;
case 5:
tmp = pins[4] ? 8000 : 6000;
if (pins[123] != 0xff)
g200->pclk_min = pins[123] * tmp;
if (pins[38] != 0xff)
g200->pclk_max = pins[38] * tmp;
if (pins[110] & 0x01)
g200->ref_clk = 14318;
break;
default:
break;
}
}
static void mgag200_g200_init_refclk(struct mgag200_g200_device *g200)
{
struct mga_device *mdev = &g200->base;
struct drm_device *dev = &mdev->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned char __iomem *rom;
unsigned char *bios;
size_t size;
g200->pclk_min = 50000;
g200->pclk_max = 230000;
g200->ref_clk = 27050;
rom = pci_map_rom(pdev, &size);
if (!rom)
return;
bios = vmalloc(size);
if (!bios)
goto out;
memcpy_fromio(bios, rom, size);
if (size != 0 && bios[0] == 0x55 && bios[1] == 0xaa)
mgag200_g200_interpret_bios(g200, bios, size);
drm_dbg_kms(dev, "pclk_min: %ld pclk_max: %ld ref_clk: %ld\n",
g200->pclk_min, g200->pclk_max, g200->ref_clk);
vfree(bios);
out:
pci_unmap_rom(pdev, rom);
}
struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mgag200_g200_device *g200;
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
g200 = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200_device, base.base);
if (IS_ERR(g200))
return ERR_CAST(g200);
mdev = &g200->base;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_g200_init_pci_options(pdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
mgag200_g200_init_refclk(g200);
ret = mgag200_device_init(mdev, type, &mgag200_g200_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200eh_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 37500, false, 1, 0, false);
struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200eh_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200eh3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200eh3_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200er_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 1, 0, false);
struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200er_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200ev_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 32700, false, 0, 1, false);
struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200ev_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,60 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev)
{
resource_size_t vram_size = resource_size(mdev->vram_res);
if (vram_size >= 0x1000000)
vram_size = vram_size - 0x400000;
return mgag200_probe_vram(mdev->vram, vram_size);
}
struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200ew3_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_g200ew3_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,130 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
static int mgag200_g200se_init_pci_options(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
bool has_sgram;
u32 option;
int err;
err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
if (err != PCIBIOS_SUCCESSFUL) {
dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err);
return pcibios_err_to_errno(err);
}
has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK);
option = 0x40049120;
if (has_sgram)
option |= PCI_MGA_OPTION_HARDPWMSK;
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200se_a_01_device_info =
MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, true);
static const struct mgag200_device_info mgag200_g200se_a_02_device_info =
MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, true);
static const struct mgag200_device_info mgag200_g200se_a_03_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false);
static const struct mgag200_device_info mgag200_g200se_b_01_device_info =
MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, false);
static const struct mgag200_device_info mgag200_g200se_b_02_device_info =
MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, false);
static const struct mgag200_device_info mgag200_g200se_b_03_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false);
static int mgag200_g200se_init_unique_rev_id(struct mgag200_g200se_device *g200se)
{
struct mga_device *mdev = &g200se->base;
struct drm_device *dev = &mdev->base;
/* stash G200 SE model number for later use */
g200se->unique_rev_id = RREG32(0x1e24);
if (!g200se->unique_rev_id)
return -ENODEV;
drm_dbg(dev, "G200 SE unique revision id is 0x%x\n", g200se->unique_rev_id);
return 0;
}
struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mgag200_g200se_device *g200se;
const struct mgag200_device_info *info;
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
g200se = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200se_device, base.base);
if (IS_ERR(g200se))
return ERR_CAST(g200se);
mdev = &g200se->base;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_g200se_init_pci_options(pdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_g200se_init_unique_rev_id(g200se);
if (ret)
return ERR_PTR(ret);
switch (type) {
case G200_SE_A:
if (g200se->unique_rev_id >= 0x03)
info = &mgag200_g200se_a_03_device_info;
else if (g200se->unique_rev_id >= 0x02)
info = &mgag200_g200se_a_02_device_info;
else
info = &mgag200_g200se_a_01_device_info;
break;
case G200_SE_B:
if (g200se->unique_rev_id >= 0x03)
info = &mgag200_g200se_b_03_device_info;
else if (g200se->unique_rev_id >= 0x02)
info = &mgag200_g200se_b_02_device_info;
else
info = &mgag200_g200se_b_01_device_info;
break;
default:
return ERR_PTR(-EINVAL);
}
ret = mgag200_device_init(mdev, type, info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/pci.h>
#include <drm/drm_drv.h>
#include "mgag200_drv.h"
/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
int ret;
mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
if (IS_ERR(mdev))
return mdev;
dev = &mdev->base;
pci_set_drvdata(pdev, dev);
ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_preinit(mdev);
if (ret)
return ERR_PTR(ret);
ret = mgag200_device_init(mdev, type, &mgag200_g200wb_device_info);
if (ret)
return ERR_PTR(ret);
vram_available = mgag200_device_probe_vram(mdev);
ret = mgag200_modeset_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
return mdev;
}

View File

@ -0,0 +1,752 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <drm/drm_framebuffer.h>
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
#include "dpu_hw_top.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_merge3d.h"
#include "dpu_hw_interrupts.h"
#include "dpu_core_irq.h"
#include "dpu_vbif.h"
#include "dpu_crtc.h"
#include "disp/msm_disp_snapshot.h"
#define to_dpu_encoder_phys_wb(x) \
container_of(x, struct dpu_encoder_phys_wb, base)
/**
* dpu_encoder_phys_wb_is_master - report wb always as master encoder
*/
static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
{
/* there is only one physical enc for dpu_writeback */
return true;
}
/**
* dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_ot_limit(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_vbif_set_ot_params ot_params;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = hw_wb->caps->xin_id;
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
ot_params.is_wfd = true;
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
ot_params.rd = false;
dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
}
/**
* dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_qos_remap(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_vbif_set_qos_params qos_params;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
DPU_ERROR("invalid arguments\n");
return;
}
if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
DPU_ERROR("invalid writeback hardware\n");
return;
}
hw_wb = phys_enc->hw_wb;
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
qos_params.num = hw_wb->idx - WB_0;
qos_params.is_rt = false;
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt);
dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
}
/**
* dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_qos_cfg qos_cfg;
const struct dpu_mdss_cfg *catalog;
const struct dpu_qos_lut_tbl *qos_lut_tb;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid parameter(s)\n");
return;
}
catalog = phys_enc->dpu_kms->catalog;
hw_wb = phys_enc->hw_wb;
memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
qos_cfg.danger_safe_en = true;
qos_cfg.danger_lut =
catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
if (hw_wb->ops.setup_qos_lut)
hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
}
/**
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
* @wb_roi: Pointer to output region of interest
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct drm_framebuffer *fb)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
struct dpu_hw_cdp_cfg cdp_cfg;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
return;
}
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
wb_cfg->roi.y1 = 0;
wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
if (hw_wb->ops.setup_roi)
hw_wb->ops.setup_roi(hw_wb, wb_cfg);
if (hw_wb->ops.setup_outformat)
hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
if (hw_wb->ops.setup_cdp) {
memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf->cdp_cfg
[DPU_PERF_CDP_USAGE_NRT].wr_enable;
cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
cdp_cfg.tile_amortize_enable =
DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
DPU_FORMAT_IS_TILE(wb_cfg->dest.format);
cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64;
hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg);
}
if (hw_wb->ops.setup_outaddress)
hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
}
/**
* dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
* @phys_enc:Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *ctl;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
hw_wb = phys_enc->hw_wb;
ctl = phys_enc->hw_ctl;
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
(phys_enc->hw_ctl &&
phys_enc->hw_ctl->ops.setup_intf_cfg)) {
struct dpu_hw_intf_cfg intf_cfg = {0};
struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
enum dpu_3d_blend_mode mode_3d;
mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
mode_3d);
/* setup which pp blk will connect to this wb */
if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true,
phys_enc->hw_pp->idx);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
} else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
struct dpu_hw_intf_cfg intf_cfg = {0};
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
intf_cfg.mode_3d =
dpu_encoder_helper_get_3d_blend_mode(phys_enc);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
}
}
/**
* dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
* @phys_enc: Pointer to physical encoder
* @crtc_state: Pointer to CRTC atomic state
* @conn_state: Pointer to connector atomic state
*/
static int dpu_encoder_phys_wb_atomic_check(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_framebuffer *fb;
const struct drm_display_mode *mode = &crtc_state->mode;
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
} else if (conn_state->connector->status !=
connector_status_connected) {
DPU_ERROR("connector not connected %d\n",
conn_state->connector->status);
return -EINVAL;
}
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
fb = conn_state->writeback_job->fb;
DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
fb->width, fb->height);
if (fb->width != mode->hdisplay) {
DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
mode->hdisplay);
return -EINVAL;
} else if (fb->height != mode->vdisplay) {
DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
mode->vdisplay);
return -EINVAL;
} else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, phys_enc->hw_wb->caps->maxlinewidth);
return -EINVAL;
}
return 0;
}
/**
* _dpu_encoder_phys_wb_update_flush - flush hardware update
* @phys_enc: Pointer to physical encoder
*/
static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
u32 pending_flush = 0;
if (!phys_enc)
return;
hw_wb = phys_enc->hw_wb;
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (!hw_ctl) {
DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
return;
}
if (hw_ctl->ops.update_pending_flush_wb)
hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
if (hw_ctl->ops.get_pending_flush)
pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
hw_ctl->idx - CTL_0, pending_flush,
hw_wb->idx - WB_0);
}
/**
* dpu_encoder_phys_wb_setup - setup writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_setup(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct drm_display_mode mode = phys_enc->cached_mode;
struct drm_framebuffer *fb = NULL;
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
mode.hdisplay, mode.vdisplay);
dpu_encoder_phys_wb_set_ot_limit(phys_enc);
dpu_encoder_phys_wb_set_qos_remap(phys_enc);
dpu_encoder_phys_wb_set_qos(phys_enc);
dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
dpu_encoder_phys_wb_setup_cdp(phys_enc);
}
static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
unsigned long lock_flags;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
phys_enc, event);
if (phys_enc->parent_ops->handle_vblank_virt)
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
if (wb_enc->wb_conn)
drm_writeback_signal_completion(wb_enc->wb_conn, 0);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
}
/**
* dpu_encoder_phys_wb_done_irq - writeback interrupt handler
* @arg: Pointer to writeback encoder
* @irq_idx: interrupt index
*/
static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
{
_dpu_encoder_phys_wb_frame_done_helper(arg);
}
/**
* dpu_encoder_phys_wb_irq_ctrl - irq control of WB
* @phys: Pointer to physical encoder
* @enable: indicates enable or disable interrupts
*/
static void dpu_encoder_phys_wb_irq_ctrl(
struct dpu_encoder_phys *phys, bool enable)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
dpu_core_irq_register_callback(phys->dpu_kms,
phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
else if (!enable &&
atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
}
static void dpu_encoder_phys_wb_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
}
static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
wb_enc->wb_done_timeout_cnt++;
if (wb_enc->wb_done_timeout_cnt == 1)
msm_disp_snapshot_state(phys_enc->parent->dev);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
/* request a ctl reset before the next kickoff */
phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
if (wb_enc->wb_conn)
drm_writeback_signal_completion(wb_enc->wb_conn, 0);
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
phys_enc->parent, phys_enc, frame_event);
}
/**
* dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
* @phys_enc: Pointer to physical encoder
*/
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
unsigned long ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
dpu_encoder_phys_wb_done_irq, &wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
else if (!ret)
wb_enc->wb_done_timeout_cnt = 0;
return ret;
}
/**
* dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
* @phys_enc: Pointer to physical encoder
* Returns: Zero on success
*/
static void dpu_encoder_phys_wb_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct drm_connector *drm_conn;
struct drm_connector_state *state;
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
if (!wb_enc->wb_conn || !wb_enc->wb_job) {
DPU_ERROR("invalid wb_conn or wb_job\n");
return;
}
drm_conn = &wb_enc->wb_conn->base;
state = drm_conn->state;
if (wb_enc->wb_conn && wb_enc->wb_job)
drm_writeback_queue_job(wb_enc->wb_conn, state);
dpu_encoder_phys_wb_setup(phys_enc);
_dpu_encoder_phys_wb_update_flush(phys_enc);
}
/**
* dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
* @phys_enc: Pointer to physical encoder
*/
static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
return false;
}
/**
* dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
}
/**
* dpu_encoder_phys_wb_enable - enable writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
{
DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
/**
* dpu_encoder_phys_wb_disable - disable writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR("encoder is already disabled\n");
return;
}
/* reset h/w before final flush */
if (phys_enc->hw_ctl->ops.clear_pending_flush)
phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
/*
* New CTL reset sequence from 5.0 MDP onwards.
* If has_3d_merge_reset is not set, legacy reset
* sequence is executed.
*
* Legacy reset sequence has not been implemented yet.
* Any target earlier than SM8150 will need it and when
* WB support is added to those targets will need to add
* the legacy teardown sequence as well.
*/
if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
dpu_encoder_helper_phys_cleanup(phys_enc);
phys_enc->enable_state = DPU_ENC_DISABLED;
}
/**
* dpu_encoder_phys_wb_destroy - destroy writeback encoder
* @phys_enc: Pointer to physical encoder
*/
static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
kfree(phys_enc);
}
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
const struct msm_format *format;
struct msm_gem_address_space *aspace;
struct dpu_hw_wb_cfg *wb_cfg;
int ret;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
if (!job->fb)
return;
wb_enc->wb_job = job;
wb_enc->wb_conn = job->connector;
aspace = phys_enc->dpu_kms->base.aspace;
wb_cfg = &wb_enc->wb_cfg;
memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
ret = msm_framebuffer_prepare(job->fb, aspace, false);
if (ret) {
DPU_ERROR("prep fb failed, %d\n", ret);
return;
}
format = msm_framebuffer_format(job->fb);
wb_cfg->dest.format = dpu_get_dpu_format_ext(
format->pixel_format, job->fb->modifier);
if (!wb_cfg->dest.format) {
/* this error should be detected during atomic_check */
DPU_ERROR("failed to get format %x\n", format->pixel_format);
return;
}
ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
if (ret) {
DPU_DEBUG("failed to populate layout %d\n", ret);
return;
}
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
(wb_cfg->dest.format->element[0] == C1_B_Cb))
swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
}
static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct msm_gem_address_space *aspace;
if (!job->fb)
return;
aspace = phys_enc->dpu_kms->base.aspace;
msm_framebuffer_cleanup(job->fb, aspace, false);
wb_enc->wb_job = NULL;
wb_enc->wb_conn = NULL;
}
static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
if (wb_enc->wb_job)
return true;
else
return false;
}
/**
* dpu_encoder_phys_wb_init_ops - initialize writeback operations
* @ops: Pointer to encoder operation table
*/
static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_wb_is_master;
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
ops->trigger_start = dpu_encoder_helper_trigger_start;
ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
}
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
* @init: Pointer to init info structure with initialization params
*/
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_wb *wb_enc = NULL;
int ret = 0;
int i;
DPU_DEBUG("\n");
if (!p || !p->parent) {
DPU_ERROR("invalid params\n");
ret = -EINVAL;
goto fail_alloc;
}
wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n");
ret = -ENOMEM;
goto fail_alloc;
}
phys_enc = &wb_enc->base;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->wb_idx = p->wb_idx;
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_WB_LINE;
phys_enc->wb_idx = p->wb_idx;
phys_enc->enc_spinlock = p->enc_spinlock;
atomic_set(&wb_enc->wbirq_refcount, 0);
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->vblank_refcount, 0);
wb_enc->wb_done_timeout_cnt = 0;
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
DPU_DEBUG("Created dpu_encoder_phys for wb %d\n",
phys_enc->wb_idx);
return phys_enc;
fail_alloc:
return ERR_PTR(ret);
}

View File

@ -0,0 +1,212 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2022, Linaro Limited
*/
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_dsc.h"
#define DSC_COMMON_MODE 0x000
#define DSC_ENC 0x004
#define DSC_PICTURE 0x008
#define DSC_SLICE 0x00C
#define DSC_CHUNK_SIZE 0x010
#define DSC_DELAY 0x014
#define DSC_SCALE_INITIAL 0x018
#define DSC_SCALE_DEC_INTERVAL 0x01C
#define DSC_SCALE_INC_INTERVAL 0x020
#define DSC_FIRST_LINE_BPG_OFFSET 0x024
#define DSC_BPG_OFFSET 0x028
#define DSC_DSC_OFFSET 0x02C
#define DSC_FLATNESS 0x030
#define DSC_RC_MODEL_SIZE 0x034
#define DSC_RC 0x038
#define DSC_RC_BUF_THRESH 0x03C
#define DSC_RANGE_MIN_QP 0x074
#define DSC_RANGE_MAX_QP 0x0B0
#define DSC_RANGE_BPG_OFFSET 0x0EC
static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
{
struct dpu_hw_blk_reg_map *c = &dsc->hw;
DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 data, lsb, bpp;
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
if (is_cmd_mode)
initial_lines += 1;
slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
data |= dsc->drm->bits_per_pixel << 12;
lsb = dsc->drm->bits_per_pixel % 4;
bpp = dsc->drm->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
data |= (dsc->drm->block_pred_enable << 7);
data |= (dsc->drm->line_buf_depth << 3);
data |= (dsc->drm->simple_422 << 2);
data |= (dsc->drm->convert_rgb << 1);
data |= dsc->drm->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
data = dsc->drm->pic_width << 16;
data |= dsc->drm->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
data = dsc->drm->slice_width << 16;
data |= dsc->drm->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
data = dsc->drm->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
data = dsc->drm->initial_dec_delay << 16;
data |= dsc->drm->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
data = dsc->drm->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
data = dsc->drm->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
data = dsc->drm->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
data = dsc->drm->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
data = dsc->drm->nfl_bpg_offset << 16;
data |= dsc->drm->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
data = dsc->drm->initial_offset << 16;
data |= dsc->drm->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
data = det_thresh_flatness << 10;
data |= dsc->drm->flatness_max_qp << 5;
data |= dsc->drm->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
data = dsc->drm->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
data = dsc->drm->rc_tgt_offset_low << 18;
data |= dsc->drm->rc_tgt_offset_high << 14;
data |= dsc->drm->rc_quant_incr_limit1 << 9;
data |= dsc->drm->rc_quant_incr_limit0 << 4;
data |= dsc->drm->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc)
{
struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
off += 4;
}
off = DSC_RANGE_MIN_QP;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
DPU_REG_WRITE(c, off, rc[i].range_min_qp);
off += 4;
}
off = DSC_RANGE_MAX_QP;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_max_qp);
off += 4;
}
off = DSC_RANGE_BPG_OFFSET;
for (i = 0; i < 15; i++) {
DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
off += 4;
}
}
static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->dsc_count; i++) {
if (dsc == m->dsc[i].id) {
b->blk_addr = addr + m->dsc[i].base;
b->log_mask = DPU_DBG_MASK_DSC;
return &m->dsc[i];
}
}
return NULL;
}
static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
unsigned long cap)
{
ops->dsc_disable = dpu_hw_dsc_disable;
ops->dsc_config = dpu_hw_dsc_config;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
};
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_dsc *c;
struct dpu_dsc_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _dsc_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
c->idx = idx;
c->caps = cfg;
_setup_dsc_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
{
kfree(dsc);
}

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020-2022, Linaro Limited */
#ifndef _DPU_HW_DSC_H
#define _DPU_HW_DSC_H
#include <drm/display/drm_dsc.h>
#define DSC_MODE_SPLIT_PANEL BIT(0)
#define DSC_MODE_MULTIPLEX BIT(1)
#define DSC_MODE_VIDEO BIT(2)
struct dpu_hw_dsc;
/**
* struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_dsc_ops {
/**
* dsc_disable - disable dsc
* @hw_dsc: Pointer to dsc context
*/
void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
/**
* dsc_config - configures dsc encoder
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
* @mode: dsc topology mode to be set
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
u32 mode,
u32 initial_lines);
/**
* dsc_config_thresh - programs panel thresholds
* @hw_dsc: Pointer to dsc context
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc);
};
struct dpu_hw_dsc {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* dsc */
enum dpu_dsc idx;
const struct dpu_dsc_cfg *caps;
/* ops */
struct dpu_hw_dsc_ops ops;
};
/**
* dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
* @idx: DSC index for which driver object is required
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
* Returns: Error code or allocated dpu_hw_dsc context
*/
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_dsc_destroy - destroys dsc driver context
* @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
*/
void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_dsc, base);
}
#endif /* _DPU_HW_DSC_H */

View File

@ -0,0 +1,276 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_wb.h"
#include "dpu_formats.h"
#include "dpu_kms.h"
#define WB_DST_FORMAT 0x000
#define WB_DST_OP_MODE 0x004
#define WB_DST_PACK_PATTERN 0x008
#define WB_DST0_ADDR 0x00C
#define WB_DST1_ADDR 0x010
#define WB_DST2_ADDR 0x014
#define WB_DST3_ADDR 0x018
#define WB_DST_YSTRIDE0 0x01C
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_DITHER_BITDEPTH 0x024
#define WB_DST_MATRIX_ROW0 0x030
#define WB_DST_MATRIX_ROW1 0x034
#define WB_DST_MATRIX_ROW2 0x038
#define WB_DST_MATRIX_ROW3 0x03C
#define WB_DST_WRITE_CONFIG 0x048
#define WB_ROTATION_DNSCALER 0x050
#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
#define WB_N16_INIT_PHASE_X_C03 0x060
#define WB_N16_INIT_PHASE_X_C12 0x064
#define WB_N16_INIT_PHASE_Y_C03 0x068
#define WB_N16_INIT_PHASE_Y_C12 0x06C
#define WB_OUT_SIZE 0x074
#define WB_ALPHA_X_VALUE 0x078
#define WB_DANGER_LUT 0x084
#define WB_SAFE_LUT 0x088
#define WB_QOS_CTRL 0x090
#define WB_CREQ_LUT_0 0x098
#define WB_CREQ_LUT_1 0x09C
#define WB_UBWC_STATIC_CTRL 0x144
#define WB_MUX 0x150
#define WB_CROP_CTRL 0x154
#define WB_CROP_OFFSET 0x158
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4
#define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4
/* WB_QOS_CTRL */
#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
const struct dpu_mdss_cfg *m, void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->wb_count; i++) {
if (wb == m->wb[i].id) {
b->blk_addr = addr + m->wb[i].base;
return &m->wb[i];
}
}
return ERR_PTR(-EINVAL);
}
static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
}
static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *data)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
const struct dpu_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
u32 dst_addr_sw = 0;
chroma_samp = fmt->chroma_sample;
dst_format = (chroma_samp << 23) |
(fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) |
(fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C0_G_Y] << 0);
if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
dst_format |= BIT(8); /* DSTC3_EN */
if (!fmt->alpha_enable ||
!(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
dst_format |= BIT(14); /* DST_ALPHA_X */
}
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
(fmt->element[0] << 0);
dst_format |= (fmt->unpack_align_msb << 18) |
(fmt->unpack_tight << 17) |
((fmt->unpack_count - 1) << 12) |
((fmt->bpp - 1) << 9);
ystride0 = data->dest.plane_pitch[0] |
(data->dest.plane_pitch[1] << 16);
ystride1 = data->dest.plane_pitch[2] |
(data->dest.plane_pitch[3] << 16);
if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
else
outsize = (data->dest.height << 16) | data->dest.width;
DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
}
static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 image_size, out_size, out_xy;
image_size = (wb->dest.height << 16) | wb->dest.width;
out_xy = 0;
out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
}
static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_qos_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 qos_ctrl = 0;
if (!ctx || !cfg)
return;
DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
/*
* for chipsets not using DPU_WB_QOS_8LVL but still using DPU
* driver such as msm8998, the reset value of WB_CREQ_LUT is
* sufficient for writeback to work. SW doesn't need to explicitly
* program a value.
*/
if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
}
if (cfg->danger_safe_en)
qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
}
static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
struct dpu_hw_cdp_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 cdp_cntl = 0;
if (!ctx || !cfg)
return;
c = &ctx->hw;
if (cfg->enable)
cdp_cntl |= BIT(0);
if (cfg->ubwc_meta_enable)
cdp_cntl |= BIT(1);
if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
cdp_cntl |= BIT(3);
DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
}
static void dpu_hw_wb_bind_pingpong_blk(
struct dpu_hw_wb *ctx,
bool enable, const enum dpu_pingpong pp)
{
struct dpu_hw_blk_reg_map *c;
int mux_cfg;
if (!ctx)
return;
c = &ctx->hw;
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
if (enable)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
DPU_REG_WRITE(c, WB_MUX, mux_cfg);
}
static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
unsigned long features)
{
ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
ops->setup_outformat = dpu_hw_wb_setup_format;
if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
ops->setup_roi = dpu_hw_wb_roi;
if (test_bit(DPU_WB_QOS, &features))
ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
if (test_bit(DPU_WB_CDP, &features))
ops->setup_cdp = dpu_hw_wb_setup_cdp;
if (test_bit(DPU_WB_INPUT_CTRL, &features))
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
}
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
void __iomem *addr, const struct dpu_mdss_cfg *m)
{
struct dpu_hw_wb *c;
const struct dpu_wb_cfg *cfg;
if (!addr || !m)
return ERR_PTR(-EINVAL);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _wb_offset(idx, m, addr, &c->hw);
if (IS_ERR(cfg)) {
WARN(1, "Unable to find wb idx=%d\n", idx);
kfree(c);
return ERR_PTR(-EINVAL);
}
/* Assign ops */
c->mdp = &m->mdp[0];
c->idx = idx;
c->caps = cfg;
_setup_wb_ops(&c->ops, c->caps->features);
return c;
}
void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
{
kfree(hw_wb);
}

View File

@ -0,0 +1,115 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
#ifndef _DPU_HW_WB_H
#define _DPU_HW_WB_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_top.h"
#include "dpu_hw_util.h"
#include "dpu_hw_pingpong.h"
struct dpu_hw_wb;
struct dpu_hw_wb_cfg {
struct dpu_hw_fmt_layout dest;
enum dpu_intf_mode intf_mode;
struct drm_rect roi;
struct drm_rect crop;
};
/**
* enum CDP preload ahead address size
*/
enum {
DPU_WB_CDP_PRELOAD_AHEAD_32,
DPU_WB_CDP_PRELOAD_AHEAD_64
};
/**
* struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
* @danger_lut: LUT for generate danger level based on fill level
* @safe_lut: LUT for generate safe level based on fill level
* @creq_lut: LUT for generate creq level based on fill level
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_wb_qos_cfg {
u32 danger_lut;
u32 safe_lut;
u64 creq_lut;
bool danger_safe_en;
};
/**
*
* struct dpu_hw_wb_ops : Interface to the wb hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_outaddress: setup output address from the writeback job
* @setup_outformat: setup output format of writeback block from writeback job
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
* @bind_pingpong_blk: enable/disable the connection with ping-pong block
*/
struct dpu_hw_wb_ops {
void (*setup_outaddress)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_outformat)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_roi)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_qos_cfg *cfg);
void (*setup_cdp)(struct dpu_hw_wb *ctx,
struct dpu_hw_cdp_cfg *cfg);
void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
bool enable, const enum dpu_pingpong pp);
};
/**
* struct dpu_hw_wb : WB driver object
* @hw: block hardware details
* @mdp: pointer to associated mdp portion of the catalog
* @idx: hardware index number within type
* @wb_hw_caps: hardware capabilities
* @ops: function pointers
* @hw_mdp: MDP top level hardware block
*/
struct dpu_hw_wb {
struct dpu_hw_blk_reg_map hw;
const struct dpu_mdp_cfg *mdp;
/* wb path */
int idx;
const struct dpu_wb_cfg *caps;
/* ops */
struct dpu_hw_wb_ops ops;
struct dpu_hw_mdp *hw_mdp;
};
/**
* dpu_hw_wb_init(): Initializes and return writeback hw driver object.
* @idx: wb_path index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
/**
* dpu_hw_wb_destroy(): Destroy writeback hw driver object.
* @hw_wb: Pointer to writeback hw driver object
*/
void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
#endif /*_DPU_HW_WB_H */

View File

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <drm/drm_edid.h>
#include "dpu_writeback.h"
static int dpu_wb_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
/*
* We should ideally be limiting the modes only to the maxlinewidth but
* on some chipsets this will allow even 4k modes to be added which will
* fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
* and source split support added lets limit the modes based on max_mixer_width
* as 4K modes can then be supported.
*/
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
dev->mode_config.max_height);
}
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
if (!job->fb)
return 0;
dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
return 0;
}
static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
if (!job->fb)
return;
dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
}
static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
.get_modes = dpu_wb_conn_get_modes,
.prepare_writeback_job = dpu_wb_conn_prepare_job,
.cleanup_writeback_job = dpu_wb_conn_cleanup_job,
};
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
const u32 *format_list, u32 num_formats)
{
struct dpu_wb_connector *dpu_wb_conn;
int rc = 0;
dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
/* DPU initializes the encoder and sets it up completely for writeback
* cases and hence should use the new API drm_writeback_connector_init_with_encoder
* to initialize the writeback connector
*/
rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
&dpu_wb_conn_funcs, format_list, num_formats);
if (!rc)
dpu_wb_conn->wb_enc = enc;
return rc;
}

View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_WRITEBACK_H
#define _DPU_WRITEBACK_H
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_encoder_phys.h"
struct dpu_wb_connector {
struct drm_writeback_connector base;
struct drm_encoder *wb_enc;
};
static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
{
return container_of(conn, struct dpu_wb_connector, base);
}
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
const u32 *format_list, u32 num_formats);
#endif /*_DPU_WRITEBACK_H */

View File

@ -0,0 +1,472 @@
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interconnect.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "msm_drv.h"
#include "msm_kms.h"
/* for DPU_HW_* defines */
#include "disp/dpu1/dpu_hw_catalog.h"
#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
#define UBWC_STATIC 0x144
#define UBWC_CTRL_2 0x150
#define UBWC_PREDICTION_MODE 0x154
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
struct msm_mdss {
struct device *dev;
void __iomem *mmio;
struct clk_bulk_data *clocks;
size_t num_clocks;
bool is_mdp5;
struct {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
struct icc_path *path[2];
u32 num_paths;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
struct msm_mdss *msm_mdss)
{
struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
msm_mdss->path[0] = path0;
msm_mdss->num_paths = 1;
if (!IS_ERR_OR_NULL(path1)) {
msm_mdss->path[1] = path1;
msm_mdss->num_paths++;
}
return 0;
}
static void msm_mdss_put_icc_path(void *data)
{
struct msm_mdss *msm_mdss = data;
int i;
for (i = 0; i < msm_mdss->num_paths; i++)
icc_put(msm_mdss->path[i]);
}
static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
{
int i;
for (i = 0; i < msm_mdss->num_paths; i++)
icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
}
static void msm_mdss_irq(struct irq_desc *desc)
{
struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts;
chained_irq_enter(chip, desc);
interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
int rc;
rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
hwirq);
if (rc < 0) {
dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
hwirq, rc);
break;
}
interrupts &= ~(1 << hwirq);
}
chained_irq_exit(chip, desc);
}
static void msm_mdss_irq_mask(struct irq_data *irqd)
{
struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static void msm_mdss_irq_unmask(struct irq_data *irqd)
{
struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static struct irq_chip msm_mdss_irq_chip = {
.name = "msm_mdss",
.irq_mask = msm_mdss_irq_mask,
.irq_unmask = msm_mdss_irq_unmask,
};
static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
static int msm_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct msm_mdss *msm_mdss = domain->host_data;
irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
return irq_set_chip_data(irq, msm_mdss);
}
static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
.map = msm_mdss_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
{
struct device *dev;
struct irq_domain *domain;
dev = msm_mdss->dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
return -EINVAL;
}
msm_mdss->irq_controller.enabled_mask = 0;
msm_mdss->irq_controller.domain = domain;
return 0;
}
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
/*
* Several components have AXI clocks that can only be turned on if
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
return ret;
}
/*
* HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on
* mdp5 hardware. Skip reading it for now.
*/
if (msm_mdss->is_mdp5)
return 0;
/*
* ubwc config is part of the "mdss" region which is not accessible
* from the rest of the driver. hardcode known configurations here
*/
switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
case DPU_HW_VER_500:
case DPU_HW_VER_501:
writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
break;
case DPU_HW_VER_600:
/* TODO: 0x102e for LP_DDR4 */
writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
break;
case DPU_HW_VER_620:
writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
break;
case DPU_HW_VER_720:
writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
break;
}
return ret;
}
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
msm_mdss_icc_request_bw(msm_mdss, 0);
return 0;
}
static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
{
struct platform_device *pdev = to_platform_device(msm_mdss->dev);
int irq;
pm_runtime_suspend(msm_mdss->dev);
pm_runtime_disable(msm_mdss->dev);
irq_domain_remove(msm_mdss->irq_controller.domain);
msm_mdss->irq_controller.domain = NULL;
irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL);
}
static int msm_mdss_reset(struct device *dev)
{
struct reset_control *reset;
reset = reset_control_get_optional_exclusive(dev, NULL);
if (!reset) {
/* Optional reset not specified */
return 0;
} else if (IS_ERR(reset)) {
return dev_err_probe(dev, PTR_ERR(reset),
"failed to acquire mdss reset\n");
}
reset_control_assert(reset);
/*
* Tests indicate that reset has to be held for some period of time,
* make it one frame in a typical system
*/
msleep(20);
reset_control_deassert(reset);
reset_control_put(reset);
return 0;
}
/*
* MDP5 MDSS uses at most three specified clocks.
*/
#define MDP5_MDSS_NUM_CLOCKS 3
static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
{
struct clk_bulk_data *bulk;
int num_clocks = 0;
int ret;
if (!pdev)
return -EINVAL;
bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
if (!bulk)
return -ENOMEM;
bulk[num_clocks++].id = "iface";
bulk[num_clocks++].id = "bus";
bulk[num_clocks++].id = "vsync";
ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
if (ret)
return ret;
*clocks = bulk;
return num_clocks;
}
static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
{
struct msm_mdss *msm_mdss;
int ret;
int irq;
ret = msm_mdss_reset(&pdev->dev);
if (ret)
return ERR_PTR(ret);
msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
return ERR_PTR(ret);
ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
if (ret)
return ERR_PTR(ret);
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
else
ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
return ERR_PTR(ret);
}
msm_mdss->num_clocks = ret;
msm_mdss->is_mdp5 = is_mdp5;
msm_mdss->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return ERR_PTR(irq);
ret = _msm_mdss_irq_domain_add(msm_mdss);
if (ret)
return ERR_PTR(ret);
irq_set_chained_handler_and_data(irq, msm_mdss_irq,
msm_mdss);
pm_runtime_enable(&pdev->dev);
return msm_mdss;
}
static int __maybe_unused mdss_runtime_suspend(struct device *dev)
{
struct msm_mdss *mdss = dev_get_drvdata(dev);
DBG("");
return msm_mdss_disable(mdss);
}
static int __maybe_unused mdss_runtime_resume(struct device *dev)
{
struct msm_mdss *mdss = dev_get_drvdata(dev);
DBG("");
return msm_mdss_enable(mdss);
}
static int __maybe_unused mdss_pm_suspend(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 0;
return mdss_runtime_suspend(dev);
}
static int __maybe_unused mdss_pm_resume(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 0;
return mdss_runtime_resume(dev);
}
static const struct dev_pm_ops mdss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
};
static int mdss_probe(struct platform_device *pdev)
{
struct msm_mdss *mdss;
bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
struct device *dev = &pdev->dev;
int ret;
mdss = msm_mdss_init(pdev, is_mdp5);
if (IS_ERR(mdss))
return PTR_ERR(mdss);
platform_set_drvdata(pdev, mdss);
/*
* MDP5/DPU based devices don't have a flat hierarchy. There is a top
* level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
* Populate the children devices, find the MDP5/DPU node, and then add
* the interfaces to our components list.
*/
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
msm_mdss_destroy(mdss);
return ret;
}
return 0;
}
static int mdss_remove(struct platform_device *pdev)
{
struct msm_mdss *mdss = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
msm_mdss_destroy(mdss);
return 0;
}
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
static struct platform_driver mdss_platform_driver = {
.probe = mdss_probe,
.remove = mdss_remove,
.driver = {
.name = "msm-mdss",
.of_match_table = mdss_dt_match,
.pm = &mdss_pm_ops,
},
};
void __init msm_mdss_register(void)
{
platform_driver_register(&mdss_platform_driver);
}
void __exit msm_mdss_unregister(void)
{
platform_driver_unregister(&mdss_platform_driver);
}

View File

@ -0,0 +1,340 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*
* This code is based on drivers/gpu/drm/mxsfb/mxsfb*
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "lcdif_drv.h"
#include "lcdif_regs.h"
static const struct drm_mode_config_funcs lcdif_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs lcdif_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif)
{
struct drm_device *drm = lcdif->drm;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel,
&bridge);
if (ret)
return ret;
if (panel) {
bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
if (!bridge)
return -ENODEV;
ret = drm_bridge_attach(&lcdif->encoder, bridge, NULL, 0);
if (ret)
return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n");
lcdif->bridge = bridge;
return 0;
}
static irqreturn_t lcdif_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
struct lcdif_drm_private *lcdif = drm->dev_private;
u32 reg, stat;
stat = readl(lcdif->base + LCDC_V8_INT_STATUS_D0);
if (!stat)
return IRQ_NONE;
if (stat & INT_STATUS_D0_VS_BLANK) {
reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
if (!(reg & CTRLDESCL0_5_SHADOW_LOAD_EN))
drm_crtc_handle_vblank(&lcdif->crtc);
}
writel(stat, lcdif->base + LCDC_V8_INT_STATUS_D0);
return IRQ_HANDLED;
}
static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
if (!lcdif)
return -ENOMEM;
lcdif->drm = drm;
drm->dev_private = lcdif;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lcdif->base = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
lcdif->clk = devm_clk_get(drm->dev, "pix");
if (IS_ERR(lcdif->clk))
return PTR_ERR(lcdif->clk);
lcdif->clk_axi = devm_clk_get(drm->dev, "axi");
if (IS_ERR(lcdif->clk_axi))
return PTR_ERR(lcdif->clk_axi);
lcdif->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
if (IS_ERR(lcdif->clk_disp_axi))
return PTR_ERR(lcdif->clk_disp_axi);
platform_set_drvdata(pdev, drm);
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(36));
if (ret)
return ret;
/* Modeset init */
drm_mode_config_init(drm);
ret = lcdif_kms_init(lcdif);
if (ret < 0) {
dev_err(drm->dev, "Failed to initialize KMS pipeline\n");
return ret;
}
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
dev_err(drm->dev, "Failed to initialise vblank\n");
return ret;
}
/* Start with vertical blanking interrupt reporting disabled. */
drm_crtc_vblank_off(&lcdif->crtc);
ret = lcdif_attach_bridge(lcdif);
if (ret)
return dev_err_probe(drm->dev, ret, "Cannot connect bridge\n");
drm->mode_config.min_width = LCDIF_MIN_XRES;
drm->mode_config.min_height = LCDIF_MIN_YRES;
drm->mode_config.max_width = LCDIF_MAX_XRES;
drm->mode_config.max_height = LCDIF_MAX_YRES;
drm->mode_config.funcs = &lcdif_mode_config_funcs;
drm->mode_config.helper_private = &lcdif_mode_config_helpers;
drm_mode_config_reset(drm);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
lcdif->irq = ret;
ret = devm_request_irq(drm->dev, lcdif->irq, lcdif_irq_handler, 0,
drm->driver->name, drm);
if (ret < 0) {
dev_err(drm->dev, "Failed to install IRQ handler\n");
return ret;
}
drm_kms_helper_poll_init(drm);
drm_helper_hpd_irq_event(drm);
pm_runtime_enable(drm->dev);
return 0;
}
static void lcdif_unload(struct drm_device *drm)
{
struct lcdif_drm_private *lcdif = drm->dev_private;
pm_runtime_get_sync(drm->dev);
drm_crtc_vblank_off(&lcdif->crtc);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
drm->dev_private = NULL;
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
static const struct drm_driver lcdif_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
.date = "20220417",
.major = 1,
.minor = 0,
};
static const struct of_device_id lcdif_dt_ids[] = {
{ .compatible = "fsl,imx8mp-lcdif" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lcdif_dt_ids);
static int lcdif_probe(struct platform_device *pdev)
{
struct drm_device *drm;
int ret;
drm = drm_dev_alloc(&lcdif_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
ret = lcdif_load(drm);
if (ret)
goto err_free;
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
drm_fbdev_generic_setup(drm, 32);
return 0;
err_unload:
lcdif_unload(drm);
err_free:
drm_dev_put(drm);
return ret;
}
static int lcdif_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
lcdif_unload(drm);
drm_dev_put(drm);
return 0;
}
static void lcdif_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_atomic_helper_shutdown(drm);
}
static int __maybe_unused lcdif_rpm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct lcdif_drm_private *lcdif = drm->dev_private;
/* These clock supply the DISPLAY CLOCK Domain */
clk_disable_unprepare(lcdif->clk);
/* These clock supply the System Bus, AXI, Write Path, LFIFO */
clk_disable_unprepare(lcdif->clk_disp_axi);
/* These clock supply the Control Bus, APB, APBH Ctrl Registers */
clk_disable_unprepare(lcdif->clk_axi);
return 0;
}
static int __maybe_unused lcdif_rpm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct lcdif_drm_private *lcdif = drm->dev_private;
/* These clock supply the Control Bus, APB, APBH Ctrl Registers */
clk_prepare_enable(lcdif->clk_axi);
/* These clock supply the System Bus, AXI, Write Path, LFIFO */
clk_prepare_enable(lcdif->clk_disp_axi);
/* These clock supply the DISPLAY CLOCK Domain */
clk_prepare_enable(lcdif->clk);
return 0;
}
static int __maybe_unused lcdif_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
int ret;
ret = drm_mode_config_helper_suspend(drm);
if (ret)
return ret;
return lcdif_rpm_suspend(dev);
}
static int __maybe_unused lcdif_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
lcdif_rpm_resume(dev);
return drm_mode_config_helper_resume(drm);
}
static const struct dev_pm_ops lcdif_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(lcdif_suspend, lcdif_resume)
SET_RUNTIME_PM_OPS(lcdif_rpm_suspend, lcdif_rpm_resume, NULL)
};
static struct platform_driver lcdif_platform_driver = {
.probe = lcdif_probe,
.remove = lcdif_remove,
.shutdown = lcdif_shutdown,
.driver = {
.name = "imx-lcdif",
.of_match_table = lcdif_dt_ids,
.pm = &lcdif_pm_ops,
},
};
drm_module_platform_driver(lcdif_platform_driver);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_DESCRIPTION("Freescale LCDIF DRM/KMS driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*
* i.MX8MP/i.MXRT LCDIFv3 LCD controller driver.
*/
#ifndef __LCDIF_DRV_H__
#define __LCDIF_DRV_H__
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_plane.h>
struct clk;
struct lcdif_drm_private {
void __iomem *base; /* registers */
struct clk *clk;
struct clk *clk_axi;
struct clk *clk_disp_axi;
unsigned int irq;
struct drm_device *drm;
struct {
struct drm_plane primary;
/* i.MXRT does support overlay planes, add them here. */
} planes;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_bridge *bridge;
};
static inline struct lcdif_drm_private *
to_lcdif_drm_private(struct drm_device *drm)
{
return drm->dev_private;
}
int lcdif_kms_init(struct lcdif_drm_private *lcdif);
#endif /* __LCDIF_DRV_H__ */

View File

@ -0,0 +1,485 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*
* This code is based on drivers/gpu/drm/mxsfb/mxsfb*
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "lcdif_drv.h"
#include "lcdif_regs.h"
/* -----------------------------------------------------------------------------
* CRTC
*/
static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
const u32 bus_format)
{
struct drm_device *drm = lcdif->drm;
const u32 format = lcdif->crtc.primary->state->fb->format->format;
writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
switch (bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
writel(DISP_PARA_LINE_PATTERN_RGB565,
lcdif->base + LCDC_V8_DISP_PARA);
break;
case MEDIA_BUS_FMT_RGB888_1X24:
writel(DISP_PARA_LINE_PATTERN_RGB888,
lcdif->base + LCDC_V8_DISP_PARA);
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
writel(DISP_PARA_LINE_PATTERN_UYVY_H,
lcdif->base + LCDC_V8_DISP_PARA);
/* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
lcdif->base + LCDC_V8_CSC0_COEF0);
writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
lcdif->base + LCDC_V8_CSC0_COEF1);
writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
lcdif->base + LCDC_V8_CSC0_COEF2);
writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
lcdif->base + LCDC_V8_CSC0_COEF3);
writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
lcdif->base + LCDC_V8_CSC0_COEF4);
writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
lcdif->base + LCDC_V8_CSC0_COEF5);
writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
lcdif->base + LCDC_V8_CSC0_CTRL);
break;
default:
dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
break;
}
switch (format) {
case DRM_FORMAT_RGB565:
writel(CTRLDESCL0_5_BPP_16_RGB565,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
case DRM_FORMAT_RGB888:
writel(CTRLDESCL0_5_BPP_24_RGB888,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
case DRM_FORMAT_XRGB1555:
writel(CTRLDESCL0_5_BPP_16_ARGB1555,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
case DRM_FORMAT_XRGB4444:
writel(CTRLDESCL0_5_BPP_16_ARGB4444,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
case DRM_FORMAT_XBGR8888:
writel(CTRLDESCL0_5_BPP_32_ABGR8888,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
case DRM_FORMAT_XRGB8888:
writel(CTRLDESCL0_5_BPP_32_ARGB8888,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
default:
dev_err(drm->dev, "Unknown pixel format 0x%x\n", format);
break;
}
}
static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
{
struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
u32 ctrl = 0;
if (m->flags & DRM_MODE_FLAG_NHSYNC)
ctrl |= CTRL_INV_HS;
if (m->flags & DRM_MODE_FLAG_NVSYNC)
ctrl |= CTRL_INV_VS;
if (bus_flags & DRM_BUS_FLAG_DE_LOW)
ctrl |= CTRL_INV_DE;
if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl |= CTRL_INV_PXCK;
writel(ctrl, lcdif->base + LCDC_V8_CTRL);
writel(DISP_SIZE_DELTA_Y(m->crtc_vdisplay) |
DISP_SIZE_DELTA_X(m->crtc_hdisplay),
lcdif->base + LCDC_V8_DISP_SIZE);
writel(HSYN_PARA_BP_H(m->htotal - m->hsync_end) |
HSYN_PARA_FP_H(m->hsync_start - m->hdisplay),
lcdif->base + LCDC_V8_HSYN_PARA);
writel(VSYN_PARA_BP_V(m->vtotal - m->vsync_end) |
VSYN_PARA_FP_V(m->vsync_start - m->vdisplay),
lcdif->base + LCDC_V8_VSYN_PARA);
writel(VSYN_HSYN_WIDTH_PW_V(m->vsync_end - m->vsync_start) |
VSYN_HSYN_WIDTH_PW_H(m->hsync_end - m->hsync_start),
lcdif->base + LCDC_V8_VSYN_HSYN_WIDTH);
writel(CTRLDESCL0_1_HEIGHT(m->crtc_vdisplay) |
CTRLDESCL0_1_WIDTH(m->crtc_hdisplay),
lcdif->base + LCDC_V8_CTRLDESCL0_1);
writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
lcdif->base + LCDC_V8_CTRLDESCL0_3);
}
static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
{
u32 reg;
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
reg |= DISP_PARA_DISP_ON;
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
reg |= CTRLDESCL0_5_EN;
writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
}
static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
{
u32 reg;
int ret;
reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
reg &= ~CTRLDESCL0_5_EN;
writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
ret = readl_poll_timeout(lcdif->base + LCDC_V8_CTRLDESCL0_5,
reg, !(reg & CTRLDESCL0_5_EN),
0, 36000); /* Wait ~2 frame times max */
if (ret)
drm_err(lcdif->drm, "Failed to disable controller!\n");
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
reg &= ~DISP_PARA_DISP_ON;
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
}
static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
{
writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_SET);
readl(lcdif->base + LCDC_V8_CTRL);
writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_CLR);
readl(lcdif->base + LCDC_V8_CTRL);
}
static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
struct drm_bridge_state *bridge_state,
const u32 bus_format)
{
struct drm_device *drm = lcdif->crtc.dev;
struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
u32 bus_flags = 0;
if (lcdif->bridge && lcdif->bridge->timings)
bus_flags = lcdif->bridge->timings->input_bus_flags;
else if (bridge_state)
bus_flags = bridge_state->input_bus_cfg.flags;
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
m->crtc_clock,
(int)(clk_get_rate(lcdif->clk) / 1000));
DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
/* Mandatory eLCDIF reset as per the Reference Manual */
lcdif_reset_block(lcdif);
lcdif_set_formats(lcdif, bus_format);
lcdif_set_mode(lcdif, bus_flags);
}
static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
bool has_primary = crtc_state->plane_mask &
drm_plane_mask(crtc->primary);
/* The primary plane has to be enabled when the CRTC is active. */
if (crtc_state->active && !has_primary)
return -EINVAL;
return drm_atomic_add_affected_planes(state, crtc);
}
static void lcdif_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
struct drm_pending_vblank_event *event;
u32 reg;
reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5);
reg |= CTRLDESCL0_5_SHADOW_LOAD_EN;
writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5);
event = crtc->state->event;
crtc->state->event = NULL;
if (!event)
return;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
crtc->primary);
struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode;
struct drm_bridge_state *bridge_state = NULL;
struct drm_device *drm = lcdif->drm;
u32 bus_format = 0;
dma_addr_t paddr;
/* If there is a bridge attached to the LCDIF, use its bus format */
if (lcdif->bridge) {
bridge_state =
drm_atomic_get_new_bridge_state(state,
lcdif->bridge);
if (!bridge_state)
bus_format = MEDIA_BUS_FMT_FIXED;
else
bus_format = bridge_state->input_bus_cfg.format;
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
"Please fix bridge driver by handling atomic_get_input_bus_fmts.\n");
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
}
}
/* If all else fails, default to RGB888_1X24 */
if (!bus_format)
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
clk_set_rate(lcdif->clk, m->crtc_clock * 1000);
pm_runtime_get_sync(drm->dev);
lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)),
lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4);
}
lcdif_enable_controller(lcdif);
drm_crtc_vblank_on(crtc);
}
static void lcdif_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
struct drm_device *drm = lcdif->drm;
struct drm_pending_vblank_event *event;
drm_crtc_vblank_off(crtc);
lcdif_disable_controller(lcdif);
spin_lock_irq(&drm->event_lock);
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
pm_runtime_put_sync(drm->dev);
}
static int lcdif_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
/* Clear and enable VBLANK IRQ */
writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0);
writel(INT_ENABLE_D0_VS_BLANK_EN, lcdif->base + LCDC_V8_INT_ENABLE_D0);
return 0;
}
static void lcdif_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev);
/* Disable and clear VBLANK IRQ */
writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D0);
writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0);
}
static const struct drm_crtc_helper_funcs lcdif_crtc_helper_funcs = {
.atomic_check = lcdif_crtc_atomic_check,
.atomic_flush = lcdif_crtc_atomic_flush,
.atomic_enable = lcdif_crtc_atomic_enable,
.atomic_disable = lcdif_crtc_atomic_disable,
};
static const struct drm_crtc_funcs lcdif_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = lcdif_crtc_enable_vblank,
.disable_vblank = lcdif_crtc_disable_vblank,
};
/* -----------------------------------------------------------------------------
* Encoder
*/
static const struct drm_encoder_funcs lcdif_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
/* -----------------------------------------------------------------------------
* Planes
*/
static int lcdif_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev);
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_new_crtc_state(state,
&lcdif->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
}
static void lcdif_plane_primary_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
plane);
dma_addr_t paddr;
paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)),
lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4);
}
}
static bool lcdif_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
return modifier == DRM_FORMAT_MOD_LINEAR;
}
static const struct drm_plane_helper_funcs lcdif_plane_primary_helper_funcs = {
.atomic_check = lcdif_plane_atomic_check,
.atomic_update = lcdif_plane_primary_atomic_update,
};
static const struct drm_plane_funcs lcdif_plane_funcs = {
.format_mod_supported = lcdif_format_mod_supported,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static const u32 lcdif_primary_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
};
static const u64 lcdif_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* -----------------------------------------------------------------------------
* Initialization
*/
int lcdif_kms_init(struct lcdif_drm_private *lcdif)
{
struct drm_encoder *encoder = &lcdif->encoder;
struct drm_crtc *crtc = &lcdif->crtc;
int ret;
drm_plane_helper_add(&lcdif->planes.primary,
&lcdif_plane_primary_helper_funcs);
ret = drm_universal_plane_init(lcdif->drm, &lcdif->planes.primary, 1,
&lcdif_plane_funcs,
lcdif_primary_plane_formats,
ARRAY_SIZE(lcdif_primary_plane_formats),
lcdif_modifiers, DRM_PLANE_TYPE_PRIMARY,
NULL);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(lcdif->drm, crtc,
&lcdif->planes.primary, NULL,
&lcdif_crtc_funcs, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
return drm_encoder_init(lcdif->drm, encoder, &lcdif_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
}

View File

@ -0,0 +1,257 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*
* i.MX8MP/i.MXRT LCDIF LCD controller driver.
*/
#ifndef __LCDIF_REGS_H__
#define __LCDIF_REGS_H__
#define REG_SET 4
#define REG_CLR 8
/* V8 register set */
#define LCDC_V8_CTRL 0x00
#define LCDC_V8_DISP_PARA 0x10
#define LCDC_V8_DISP_SIZE 0x14
#define LCDC_V8_HSYN_PARA 0x18
#define LCDC_V8_VSYN_PARA 0x1c
#define LCDC_V8_VSYN_HSYN_WIDTH 0x20
#define LCDC_V8_INT_STATUS_D0 0x24
#define LCDC_V8_INT_ENABLE_D0 0x28
#define LCDC_V8_INT_STATUS_D1 0x30
#define LCDC_V8_INT_ENABLE_D1 0x34
#define LCDC_V8_CTRLDESCL0_1 0x200
#define LCDC_V8_CTRLDESCL0_3 0x208
#define LCDC_V8_CTRLDESCL_LOW0_4 0x20c
#define LCDC_V8_CTRLDESCL_HIGH0_4 0x210
#define LCDC_V8_CTRLDESCL0_5 0x214
#define LCDC_V8_CSC0_CTRL 0x21c
#define LCDC_V8_CSC0_COEF0 0x220
#define LCDC_V8_CSC0_COEF1 0x224
#define LCDC_V8_CSC0_COEF2 0x228
#define LCDC_V8_CSC0_COEF3 0x22c
#define LCDC_V8_CSC0_COEF4 0x230
#define LCDC_V8_CSC0_COEF5 0x234
#define LCDC_V8_PANIC0_THRES 0x238
#define CTRL_SFTRST BIT(31)
#define CTRL_CLKGATE BIT(30)
#define CTRL_BYPASS_COUNT BIT(19)
#define CTRL_VSYNC_MODE BIT(18)
#define CTRL_DOTCLK_MODE BIT(17)
#define CTRL_DATA_SELECT BIT(16)
#define CTRL_BUS_WIDTH_16 (0 << 10)
#define CTRL_BUS_WIDTH_8 (1 << 10)
#define CTRL_BUS_WIDTH_18 (2 << 10)
#define CTRL_BUS_WIDTH_24 (3 << 10)
#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
#define CTRL_WORD_LENGTH_16 (0 << 8)
#define CTRL_WORD_LENGTH_8 (1 << 8)
#define CTRL_WORD_LENGTH_18 (2 << 8)
#define CTRL_WORD_LENGTH_24 (3 << 8)
#define CTRL_MASTER BIT(5)
#define CTRL_DF16 BIT(3)
#define CTRL_DF18 BIT(2)
#define CTRL_DF24 BIT(1)
#define CTRL_RUN BIT(0)
#define CTRL1_RECOVER_ON_UNDERFLOW BIT(24)
#define CTRL1_FIFO_CLEAR BIT(21)
#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
#define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13)
#define CTRL1_CUR_FRAME_DONE_IRQ BIT(9)
#define CTRL2_SET_OUTSTANDING_REQS_1 0
#define CTRL2_SET_OUTSTANDING_REQS_2 (0x1 << 21)
#define CTRL2_SET_OUTSTANDING_REQS_4 (0x2 << 21)
#define CTRL2_SET_OUTSTANDING_REQS_8 (0x3 << 21)
#define CTRL2_SET_OUTSTANDING_REQS_16 (0x4 << 21)
#define CTRL2_SET_OUTSTANDING_REQS_MASK (0x7 << 21)
#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
#define VDCTRL0_ENABLE_PRESENT BIT(28)
#define VDCTRL0_VSYNC_ACT_HIGH BIT(27)
#define VDCTRL0_HSYNC_ACT_HIGH BIT(26)
#define VDCTRL0_DOTCLK_ACT_FALLING BIT(25)
#define VDCTRL0_ENABLE_ACT_HIGH BIT(24)
#define VDCTRL0_VSYNC_PERIOD_UNIT BIT(21)
#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT BIT(20)
#define VDCTRL0_HALF_LINE BIT(19)
#define VDCTRL0_HALF_LINE_MODE BIT(18)
#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
#define VDCTRL3_MUX_SYNC_SIGNALS BIT(29)
#define VDCTRL3_VSYNC_ONLY BIT(28)
#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
#define VDCTRL4_SYNC_SIGNALS_ON BIT(18)
#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
#define DEBUG0_HSYNC BIT(26)
#define DEBUG0_VSYNC BIT(25)
#define AS_CTRL_PS_DISABLE BIT(23)
#define AS_CTRL_ALPHA_INVERT BIT(20)
#define AS_CTRL_ALPHA(a) (((a) & 0xff) << 8)
#define AS_CTRL_FORMAT_RGB565 (0xe << 4)
#define AS_CTRL_FORMAT_RGB444 (0xd << 4)
#define AS_CTRL_FORMAT_RGB555 (0xc << 4)
#define AS_CTRL_FORMAT_ARGB4444 (0x9 << 4)
#define AS_CTRL_FORMAT_ARGB1555 (0x8 << 4)
#define AS_CTRL_FORMAT_RGB888 (0x4 << 4)
#define AS_CTRL_FORMAT_ARGB8888 (0x0 << 4)
#define AS_CTRL_ENABLE_COLORKEY BIT(3)
#define AS_CTRL_ALPHA_CTRL_ROP (3 << 1)
#define AS_CTRL_ALPHA_CTRL_MULTIPLY (2 << 1)
#define AS_CTRL_ALPHA_CTRL_OVERRIDE (1 << 1)
#define AS_CTRL_ALPHA_CTRL_EMBEDDED (0 << 1)
#define AS_CTRL_AS_ENABLE BIT(0)
/* V8 register set */
#define CTRL_SW_RESET BIT(31)
#define CTRL_FETCH_START_OPTION_FPV 0
#define CTRL_FETCH_START_OPTION_PWV BIT(8)
#define CTRL_FETCH_START_OPTION_BPV BIT(9)
#define CTRL_FETCH_START_OPTION_RESV GENMASK(9, 8)
#define CTRL_FETCH_START_OPTION_MASK GENMASK(9, 8)
#define CTRL_NEG BIT(4)
#define CTRL_INV_PXCK BIT(3)
#define CTRL_INV_DE BIT(2)
#define CTRL_INV_VS BIT(1)
#define CTRL_INV_HS BIT(0)
#define DISP_PARA_DISP_ON BIT(31)
#define DISP_PARA_SWAP_EN BIT(30)
#define DISP_PARA_LINE_PATTERN_UYVY_H (GENMASK(29, 28) | BIT(26))
#define DISP_PARA_LINE_PATTERN_RGB565 GENMASK(28, 26)
#define DISP_PARA_LINE_PATTERN_RGB888 0
#define DISP_PARA_LINE_PATTERN_MASK GENMASK(29, 26)
#define DISP_PARA_DISP_MODE_MASK GENMASK(25, 24)
#define DISP_PARA_BGND_R_MASK GENMASK(23, 16)
#define DISP_PARA_BGND_G_MASK GENMASK(15, 8)
#define DISP_PARA_BGND_B_MASK GENMASK(7, 0)
#define DISP_SIZE_DELTA_Y(n) (((n) & 0xffff) << 16)
#define DISP_SIZE_DELTA_Y_MASK GENMASK(31, 16)
#define DISP_SIZE_DELTA_X(n) ((n) & 0xffff)
#define DISP_SIZE_DELTA_X_MASK GENMASK(15, 0)
#define HSYN_PARA_BP_H(n) (((n) & 0xffff) << 16)
#define HSYN_PARA_BP_H_MASK GENMASK(31, 16)
#define HSYN_PARA_FP_H(n) ((n) & 0xffff)
#define HSYN_PARA_FP_H_MASK GENMASK(15, 0)
#define VSYN_PARA_BP_V(n) (((n) & 0xffff) << 16)
#define VSYN_PARA_BP_V_MASK GENMASK(31, 16)
#define VSYN_PARA_FP_V(n) ((n) & 0xffff)
#define VSYN_PARA_FP_V_MASK GENMASK(15, 0)
#define VSYN_HSYN_WIDTH_PW_V(n) (((n) & 0xffff) << 16)
#define VSYN_HSYN_WIDTH_PW_V_MASK GENMASK(31, 16)
#define VSYN_HSYN_WIDTH_PW_H(n) ((n) & 0xffff)
#define VSYN_HSYN_WIDTH_PW_H_MASK GENMASK(15, 0)
#define INT_STATUS_D0_FIFO_EMPTY BIT(24)
#define INT_STATUS_D0_DMA_DONE BIT(16)
#define INT_STATUS_D0_DMA_ERR BIT(8)
#define INT_STATUS_D0_VS_BLANK BIT(2)
#define INT_STATUS_D0_UNDERRUN BIT(1)
#define INT_STATUS_D0_VSYNC BIT(0)
#define INT_ENABLE_D0_FIFO_EMPTY_EN BIT(24)
#define INT_ENABLE_D0_DMA_DONE_EN BIT(16)
#define INT_ENABLE_D0_DMA_ERR_EN BIT(8)
#define INT_ENABLE_D0_VS_BLANK_EN BIT(2)
#define INT_ENABLE_D0_UNDERRUN_EN BIT(1)
#define INT_ENABLE_D0_VSYNC_EN BIT(0)
#define INT_STATUS_D1_PLANE_PANIC BIT(0)
#define INT_ENABLE_D1_PLANE_PANIC_EN BIT(0)
#define CTRLDESCL0_1_HEIGHT(n) (((n) & 0xffff) << 16)
#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
#define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
#define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
#define CTRLDESCL0_3_PITCH(n) ((n) & 0xffff)
#define CTRLDESCL0_3_PITCH_MASK GENMASK(15, 0)
#define CTRLDESCL_HIGH0_4_ADDR_HIGH(n) ((n) & 0xf)
#define CTRLDESCL_HIGH0_4_ADDR_HIGH_MASK GENMASK(3, 0)
#define CTRLDESCL0_5_EN BIT(31)
#define CTRLDESCL0_5_SHADOW_LOAD_EN BIT(30)
#define CTRLDESCL0_5_BPP_16_RGB565 BIT(26)
#define CTRLDESCL0_5_BPP_16_ARGB1555 (BIT(26) | BIT(24))
#define CTRLDESCL0_5_BPP_16_ARGB4444 (BIT(26) | BIT(25))
#define CTRLDESCL0_5_BPP_YCbCr422 (BIT(26) | BIT(25) | BIT(24))
#define CTRLDESCL0_5_BPP_24_RGB888 BIT(27)
#define CTRLDESCL0_5_BPP_32_ARGB8888 (BIT(27) | BIT(24))
#define CTRLDESCL0_5_BPP_32_ABGR8888 (BIT(27) | BIT(25))
#define CTRLDESCL0_5_BPP_MASK GENMASK(27, 24)
#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U 0
#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V BIT(14)
#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 BIT(15)
#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (BIT(15) | BIT(14))
#define CTRLDESCL0_5_YUV_FORMAT_MASK GENMASK(15, 14)
#define CSC0_CTRL_CSC_MODE_RGB2YCbCr GENMASK(2, 1)
#define CSC0_CTRL_CSC_MODE_MASK GENMASK(2, 1)
#define CSC0_CTRL_BYPASS BIT(0)
#define CSC0_COEF0_A2(n) (((n) << 16) & CSC0_COEF0_A2_MASK)
#define CSC0_COEF0_A2_MASK GENMASK(26, 16)
#define CSC0_COEF0_A1(n) ((n) & CSC0_COEF0_A1_MASK)
#define CSC0_COEF0_A1_MASK GENMASK(10, 0)
#define CSC0_COEF1_B1(n) (((n) << 16) & CSC0_COEF1_B1_MASK)
#define CSC0_COEF1_B1_MASK GENMASK(26, 16)
#define CSC0_COEF1_A3(n) ((n) & CSC0_COEF1_A3_MASK)
#define CSC0_COEF1_A3_MASK GENMASK(10, 0)
#define CSC0_COEF2_B3(n) (((n) << 16) & CSC0_COEF2_B3_MASK)
#define CSC0_COEF2_B3_MASK GENMASK(26, 16)
#define CSC0_COEF2_B2(n) ((n) & CSC0_COEF2_B2_MASK)
#define CSC0_COEF2_B2_MASK GENMASK(10, 0)
#define CSC0_COEF3_C2(n) (((n) << 16) & CSC0_COEF3_C2_MASK)
#define CSC0_COEF3_C2_MASK GENMASK(26, 16)
#define CSC0_COEF3_C1(n) ((n) & CSC0_COEF3_C1_MASK)
#define CSC0_COEF3_C1_MASK GENMASK(10, 0)
#define CSC0_COEF4_D1(n) (((n) << 16) & CSC0_COEF4_D1_MASK)
#define CSC0_COEF4_D1_MASK GENMASK(24, 16)
#define CSC0_COEF4_C3(n) ((n) & CSC0_COEF4_C3_MASK)
#define CSC0_COEF4_C3_MASK GENMASK(10, 0)
#define CSC0_COEF5_D3(n) (((n) << 16) & CSC0_COEF5_D3_MASK)
#define CSC0_COEF5_D3_MASK GENMASK(24, 16)
#define CSC0_COEF5_D2(n) ((n) & CSC0_COEF5_D2_MASK)
#define CSC0_COEF5_D2_MASK GENMASK(8, 0)
#define PANIC0_THRES_LOW_MASK GENMASK(24, 16)
#define PANIC0_THRES_HIGH_MASK GENMASK(8, 0)
#define LCDIF_MIN_XRES 120
#define LCDIF_MIN_YRES 120
#define LCDIF_MAX_XRES 0xffff
#define LCDIF_MAX_YRES 0xffff
#endif /* __LCDIF_REGS_H__ */

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_CONN_H__
#define __NVIF_CONN_H__
#include <nvif/object.h>
struct nvif_disp;
struct nvif_conn {
struct nvif_object object;
};
int nvif_conn_ctor(struct nvif_disp *, const char *name, int id, struct nvif_conn *);
void nvif_conn_dtor(struct nvif_conn *);
#define NVIF_CONN_HPD_STATUS_UNSUPPORTED 0 /* negative if query fails */
#define NVIF_CONN_HPD_STATUS_NOT_PRESENT 1
#define NVIF_CONN_HPD_STATUS_PRESENT 2
int nvif_conn_hpd_status(struct nvif_conn *);
#endif

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0010_H__
#define __NVIF_IF0010_H__
union nvif_disp_args {
struct nvif_disp_v0 {
__u8 version;
__u8 pad01[3];
__u32 conn_mask;
__u32 outp_mask;
} v0;
};
#endif

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0011_H__
#define __NVIF_IF0011_H__
union nvif_conn_args {
struct nvif_conn_v0 {
__u8 version;
__u8 id; /* DCB connector table index. */
__u8 pad02[6];
} v0;
};
#define NVIF_CONN_V0_HPD_STATUS 0x00000000
union nvif_conn_hpd_status_args {
struct nvif_conn_hpd_status_v0 {
__u8 version;
__u8 support;
__u8 present;
__u8 pad03[5];
} v0;
};
#endif

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0012_H__
#define __NVIF_IF0012_H__
union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
__u8 id; /* DCB device index. */
__u8 pad02[6];
} v0;
};
#define NVIF_OUTP_V0_LOAD_DETECT 0x00
union nvif_outp_load_detect_args {
struct nvif_outp_load_detect_v0 {
__u8 version;
__u8 load;
__u8 pad02[2];
__u32 data; /*TODO: move vbios loadval parsing into nvkm */
} v0;
};
#endif

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_IF0014_H__
#define __NVIF_IF0014_H__
union nvif_disp_chan_args {
struct nvif_disp_chan_v0 {
__u8 version;
__u8 id;
__u8 pad02[6];
__u64 pushbuf;
} v0;
};
#endif

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_OUTP_H__
#define __NVIF_OUTP_H__
#include <nvif/object.h>
struct nvif_disp;
struct nvif_outp {
struct nvif_object object;
};
int nvif_outp_ctor(struct nvif_disp *, const char *name, int id, struct nvif_outp *);
void nvif_outp_dtor(struct nvif_outp *);
int nvif_outp_load_detect(struct nvif_outp *, u32 loadval);
#endif

View File

@ -0,0 +1,62 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/conn.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0011.h>
int
nvif_conn_hpd_status(struct nvif_conn *conn)
{
struct nvif_conn_hpd_status_v0 args;
int ret;
args.version = 0;
ret = nvif_mthd(&conn->object, NVIF_CONN_V0_HPD_STATUS, &args, sizeof(args));
NVIF_ERRON(ret, &conn->object, "[HPD_STATUS] support:%d present:%d",
args.support, args.present);
return ret ? ret : !!args.support + !!args.present;
}
void
nvif_conn_dtor(struct nvif_conn *conn)
{
nvif_object_dtor(&conn->object);
}
int
nvif_conn_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_conn *conn)
{
struct nvif_conn_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ?: "nvifConn", id, NVIF_CLASS_CONN,
&args, sizeof(args), &conn->object);
NVIF_ERRON(ret, &disp->object, "[NEW conn id:%d]", id);
return ret;
}

View File

@ -0,0 +1,62 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/outp.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0012.h>
int
nvif_outp_load_detect(struct nvif_outp *outp, u32 loadval)
{
struct nvif_outp_load_detect_v0 args;
int ret;
args.version = 0;
args.data = loadval;
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_LOAD_DETECT, &args, sizeof(args));
NVIF_ERRON(ret, &outp->object, "[LOAD_DETECT data:%08x] load:%02x", args.data, args.load);
return ret < 0 ? ret : args.load;
}
void
nvif_outp_dtor(struct nvif_outp *outp)
{
nvif_object_dtor(&outp->object);
}
int
nvif_outp_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_outp *outp)
{
struct nvif_outp_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ?: "nvifOutp", id, NVIF_CLASS_OUTP,
&args, sizeof(args), &outp->object);
NVIF_ERRON(ret, &disp->object, "[NEW outp id:%d]", id);
return ret;
}

View File

@ -0,0 +1,275 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "chan.h"
#include <core/oproxy.h>
#include <core/ramht.h>
#include <nvif/if0014.h>
static int
nvkm_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
u64 size, base = chan->func->user(chan, &size);
*data = nvkm_rd32(device, base + addr);
return 0;
}
static int
nvkm_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
u64 size, base = chan->func->user(chan, &size);
nvkm_wr32(device, base + addr, data);
return 0;
}
static int
nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_disp *disp = chan->disp;
switch (type) {
case 0:
*pevent = &disp->uevent;
return 0;
default:
break;
}
return -EINVAL;
}
static int
nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u64 base = device->func->resource_addr(device, 0);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
return 0;
}
struct nvkm_disp_chan_object {
struct nvkm_oproxy oproxy;
struct nvkm_disp *disp;
int hash;
};
static void
nvkm_disp_chan_child_del_(struct nvkm_oproxy *base)
{
struct nvkm_disp_chan_object *object = container_of(base, typeof(*object), oproxy);
nvkm_ramht_remove(object->disp->ramht, object->hash);
}
static const struct nvkm_oproxy_func
nvkm_disp_chan_child_func_ = {
.dtor[0] = nvkm_disp_chan_child_del_,
};
static int
nvkm_disp_chan_child_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(oclass->parent);
struct nvkm_disp *disp = chan->disp;
struct nvkm_device *device = disp->engine.subdev.device;
const struct nvkm_device_oclass *sclass = oclass->priv;
struct nvkm_disp_chan_object *object;
int ret;
if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
nvkm_oproxy_ctor(&nvkm_disp_chan_child_func_, oclass, &object->oproxy);
object->disp = disp;
*pobject = &object->oproxy.base;
ret = sclass->ctor(device, oclass, argv, argc, &object->oproxy.object);
if (ret)
return ret;
object->hash = chan->func->bind(chan, object->oproxy.object, oclass->handle);
if (object->hash < 0)
return object->hash;
return 0;
}
static int
nvkm_disp_chan_child_get(struct nvkm_object *object, int index, struct nvkm_oclass *sclass)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
const struct nvkm_device_oclass *oclass = NULL;
if (chan->func->bind)
sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
else
sclass->engine = NULL;
if (sclass->engine && sclass->engine->func->base.sclass) {
sclass->engine->func->base.sclass(sclass, index, &oclass);
if (oclass) {
sclass->ctor = nvkm_disp_chan_child_new;
sclass->priv = oclass;
return 0;
}
}
return -EINVAL;
}
static int
nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
chan->func->fini(chan);
chan->func->intr(chan, false);
return 0;
}
static int
nvkm_disp_chan_init(struct nvkm_object *object)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
chan->func->intr(chan, true);
return chan->func->init(chan);
}
static void *
nvkm_disp_chan_dtor(struct nvkm_object *object)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_disp *disp = chan->disp;
spin_lock(&disp->client.lock);
if (disp->chan[chan->chid.user] == chan)
disp->chan[chan->chid.user] = NULL;
spin_unlock(&disp->client.lock);
nvkm_memory_unref(&chan->memory);
return chan;
}
static const struct nvkm_object_func
nvkm_disp_chan = {
.dtor = nvkm_disp_chan_dtor,
.init = nvkm_disp_chan_init,
.fini = nvkm_disp_chan_fini,
.rd32 = nvkm_disp_chan_rd32,
.wr32 = nvkm_disp_chan_wr32,
.ntfy = nvkm_disp_chan_ntfy,
.map = nvkm_disp_chan_map,
.sclass = nvkm_disp_chan_child_get,
};
static int
nvkm_disp_chan_new_(struct nvkm_disp *disp, int nr, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
const struct nvkm_disp_chan_user *user = NULL;
struct nvkm_disp_chan *chan;
union nvif_disp_chan_args *args = argv;
int ret, i;
for (i = 0; disp->func->user[i].ctor; i++) {
if (disp->func->user[i].base.oclass == oclass->base.oclass) {
user = disp->func->user[i].chan;
break;
}
}
if (WARN_ON(!user))
return -EINVAL;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (args->v0.id >= nr || !args->v0.pushbuf != !user->func->push)
return -EINVAL;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
*pobject = &chan->object;
nvkm_object_ctor(&nvkm_disp_chan, oclass, &chan->object);
chan->func = user->func;
chan->mthd = user->mthd;
chan->disp = disp;
chan->chid.ctrl = user->ctrl + args->v0.id;
chan->chid.user = user->user + args->v0.id;
chan->head = args->v0.id;
if (chan->func->push) {
ret = chan->func->push(chan, args->v0.pushbuf);
if (ret)
return ret;
}
spin_lock(&disp->client.lock);
if (disp->chan[chan->chid.user]) {
spin_unlock(&disp->client.lock);
return -EBUSY;
}
disp->chan[chan->chid.user] = chan;
spin_unlock(&disp->client.lock);
return 0;
}
int
nvkm_disp_wndw_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, disp->wndw.nr, oclass, argv, argc, pobject);
}
int
nvkm_disp_chan_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, disp->head.nr, oclass, argv, argc, pobject);
}
int
nvkm_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, 1, oclass, argv, argc, pobject);
}

View File

@ -0,0 +1,135 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_DISP_CHAN_H__
#define __NVKM_DISP_CHAN_H__
#define nvkm_disp_chan(p) container_of((p), struct nvkm_disp_chan, object)
#include <core/object.h>
#include "priv.h"
struct nvkm_disp_chan {
const struct nvkm_disp_chan_func *func;
const struct nvkm_disp_chan_mthd *mthd;
struct nvkm_disp *disp;
struct {
int ctrl;
int user;
} chid;
int head;
struct nvkm_object object;
struct nvkm_memory *memory;
u64 push;
u32 suspend_put;
};
int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
int nvkm_disp_chan_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
int nvkm_disp_wndw_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
struct nvkm_disp_chan_func {
int (*push)(struct nvkm_disp_chan *, u64 object);
int (*init)(struct nvkm_disp_chan *);
void (*fini)(struct nvkm_disp_chan *);
void (*intr)(struct nvkm_disp_chan *, bool en);
u64 (*user)(struct nvkm_disp_chan *, u64 *size);
int (*bind)(struct nvkm_disp_chan *, struct nvkm_object *, u32 handle);
};
void nv50_disp_chan_intr(struct nvkm_disp_chan *, bool);
u64 nv50_disp_chan_user(struct nvkm_disp_chan *, u64 *);
extern const struct nvkm_disp_chan_func nv50_disp_pioc_func;
extern const struct nvkm_disp_chan_func nv50_disp_dmac_func;
int nv50_disp_dmac_push(struct nvkm_disp_chan *, u64);
int nv50_disp_dmac_bind(struct nvkm_disp_chan *, struct nvkm_object *, u32);
extern const struct nvkm_disp_chan_func nv50_disp_core_func;
void gf119_disp_chan_intr(struct nvkm_disp_chan *, bool);
extern const struct nvkm_disp_chan_func gf119_disp_pioc_func;
extern const struct nvkm_disp_chan_func gf119_disp_dmac_func;
void gf119_disp_dmac_fini(struct nvkm_disp_chan *);
int gf119_disp_dmac_bind(struct nvkm_disp_chan *, struct nvkm_object *, u32);
extern const struct nvkm_disp_chan_func gf119_disp_core_func;
void gf119_disp_core_fini(struct nvkm_disp_chan *);
extern const struct nvkm_disp_chan_func gp102_disp_dmac_func;
u64 gv100_disp_chan_user(struct nvkm_disp_chan *, u64 *);
int gv100_disp_dmac_init(struct nvkm_disp_chan *);
void gv100_disp_dmac_fini(struct nvkm_disp_chan *);
int gv100_disp_dmac_bind(struct nvkm_disp_chan *, struct nvkm_object *, u32);
struct nvkm_disp_chan_user {
const struct nvkm_disp_chan_func *func;
int ctrl;
int user;
const struct nvkm_disp_chan_mthd *mthd;
};
extern const struct nvkm_disp_chan_user nv50_disp_oimm;
extern const struct nvkm_disp_chan_user nv50_disp_curs;
extern const struct nvkm_disp_chan_user g84_disp_core;
extern const struct nvkm_disp_chan_user g84_disp_base;
extern const struct nvkm_disp_chan_user g84_disp_ovly;
extern const struct nvkm_disp_chan_user g94_disp_core;
extern const struct nvkm_disp_chan_user gt200_disp_ovly;
extern const struct nvkm_disp_chan_user gf119_disp_base;
extern const struct nvkm_disp_chan_user gf119_disp_oimm;
extern const struct nvkm_disp_chan_user gf119_disp_curs;
extern const struct nvkm_disp_chan_user gk104_disp_core;
extern const struct nvkm_disp_chan_user gk104_disp_ovly;
extern const struct nvkm_disp_chan_user gv100_disp_core;
extern const struct nvkm_disp_chan_user gv100_disp_curs;
extern const struct nvkm_disp_chan_user gv100_disp_wndw;
extern const struct nvkm_disp_chan_user gv100_disp_wimm;
struct nvkm_disp_mthd_list {
u32 mthd;
u32 addr;
struct {
u32 mthd;
u32 addr;
const char *name;
} data[];
};
struct nvkm_disp_chan_mthd {
const char *name;
u32 addr;
s32 prev;
struct {
const char *name;
int nr;
const struct nvkm_disp_mthd_list *mthd;
} data[];
};
void nv50_disp_chan_mthd(struct nvkm_disp_chan *, int debug);
extern const struct nvkm_disp_mthd_list nv50_disp_core_mthd_base;
extern const struct nvkm_disp_mthd_list nv50_disp_core_mthd_sor;
extern const struct nvkm_disp_mthd_list nv50_disp_core_mthd_pior;
extern const struct nvkm_disp_mthd_list nv50_disp_base_mthd_image;
extern const struct nvkm_disp_chan_mthd g84_disp_core_mthd;
extern const struct nvkm_disp_mthd_list g84_disp_core_mthd_dac;
extern const struct nvkm_disp_mthd_list g84_disp_core_mthd_head;
extern const struct nvkm_disp_chan_mthd g94_disp_core_mthd;
extern const struct nvkm_disp_mthd_list gf119_disp_core_mthd_base;
extern const struct nvkm_disp_mthd_list gf119_disp_core_mthd_dac;
extern const struct nvkm_disp_mthd_list gf119_disp_core_mthd_sor;
extern const struct nvkm_disp_mthd_list gf119_disp_core_mthd_pior;
extern const struct nvkm_disp_chan_mthd gf119_disp_base_mthd;
extern const struct nvkm_disp_chan_mthd gk104_disp_core_mthd;
extern const struct nvkm_disp_chan_mthd gk104_disp_ovly_mthd;
#endif

View File

@ -0,0 +1,117 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uconn(p) container_of((p), struct nvkm_conn, object)
#include "conn.h"
#include <subdev/gpio.h>
#include <nvif/if0011.h>
static int
nvkm_uconn_mthd_hpd_status(struct nvkm_conn *conn, void *argv, u32 argc)
{
struct nvkm_gpio *gpio = conn->disp->engine.subdev.device->gpio;
union nvif_conn_hpd_status_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
args->v0.support = gpio && conn->info.hpd != DCB_GPIO_UNUSED;
args->v0.present = 0;
if (args->v0.support) {
int ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->info.hpd);
if (WARN_ON(ret < 0)) {
args->v0.support = false;
return 0;
}
args->v0.present = ret;
}
return 0;
}
static int
nvkm_uconn_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_conn *conn = nvkm_uconn(object);
switch (mthd) {
case NVIF_CONN_V0_HPD_STATUS: return nvkm_uconn_mthd_hpd_status(conn, argv, argc);
default:
break;
}
return -EINVAL;
}
static void *
nvkm_uconn_dtor(struct nvkm_object *object)
{
struct nvkm_conn *conn = nvkm_uconn(object);
struct nvkm_disp *disp = conn->disp;
spin_lock(&disp->client.lock);
conn->object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_uconn = {
.dtor = nvkm_uconn_dtor,
.mthd = nvkm_uconn_mthd,
};
int
nvkm_uconn_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct nvkm_conn *cont, *conn = NULL;
union nvif_conn_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
list_for_each_entry(cont, &disp->conns, head) {
if (cont->index == args->v0.id) {
conn = cont;
break;
}
}
if (!conn)
return -EINVAL;
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!conn->object.func) {
nvkm_object_ctor(&nvkm_uconn, oclass, &conn->object);
*pobject = &conn->object;
ret = 0;
}
spin_unlock(&disp->client.lock);
return ret;
}

View File

@ -0,0 +1,115 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "conn.h"
#include "outp.h"
#include <nvif/class.h>
#include <nvif/if0010.h>
static int
nvkm_udisp_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *sclass)
{
struct nvkm_disp *disp = nvkm_udisp(object);
if (index-- == 0) {
sclass->base = (struct nvkm_sclass) { 0, 0, NVIF_CLASS_CONN };
sclass->ctor = nvkm_uconn_new;
return 0;
}
if (index-- == 0) {
sclass->base = (struct nvkm_sclass) { 0, 0, NVIF_CLASS_OUTP };
sclass->ctor = nvkm_uoutp_new;
return 0;
}
if (disp->func->user[index].ctor) {
sclass->base = disp->func->user[index].base;
sclass->ctor = disp->func->user[index].ctor;
return 0;
}
return -EINVAL;
}
static int
nvkm_udisp_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_disp *disp = nvkm_udisp(object);
if (disp->engine.subdev.device->card_type >= NV_50)
return nv50_disp_root_mthd_(object, mthd, argv, argc);
return nv04_disp_mthd(object, mthd, argv, argc);
}
static void *
nvkm_udisp_dtor(struct nvkm_object *object)
{
struct nvkm_disp *disp = nvkm_udisp(object);
spin_lock(&disp->client.lock);
if (object == &disp->client.object)
disp->client.object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_udisp = {
.dtor = nvkm_udisp_dtor,
.mthd = nvkm_udisp_mthd,
.ntfy = nvkm_disp_ntfy,
.sclass = nvkm_udisp_sclass,
};
int
nvkm_udisp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_disp(oclass->engine);
struct nvkm_conn *conn;
struct nvkm_outp *outp;
union nvif_disp_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
spin_lock(&disp->client.lock);
if (disp->client.object.func) {
spin_unlock(&disp->client.lock);
return -EBUSY;
}
nvkm_object_ctor(&nvkm_udisp, oclass, &disp->client.object);
*pobject = &disp->client.object;
spin_unlock(&disp->client.lock);
args->v0.conn_mask = 0;
list_for_each_entry(conn, &disp->conns, head)
args->v0.conn_mask |= BIT(conn->index);
args->v0.outp_mask = 0;
list_for_each_entry(outp, &disp->outps, head)
args->v0.outp_mask |= BIT(outp->index);
return 0;
}

View File

@ -0,0 +1,129 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uoutp(p) container_of((p), struct nvkm_outp, object)
#include "outp.h"
#include "ior.h"
#include <nvif/if0012.h>
static int
nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_load_detect_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
if (ret == 0) {
if (outp->ior->func->sense) {
ret = outp->ior->func->sense(outp->ior, args->v0.data);
args->v0.load = ret < 0 ? 0 : ret;
} else {
ret = -EINVAL;
}
nvkm_outp_release(outp, NVKM_OUTP_PRIV);
}
return ret;
}
static int
nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
{
switch (mthd) {
case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc);
default:
break;
}
return 1;
}
static int
nvkm_uoutp_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_outp *outp = nvkm_uoutp(object);
struct nvkm_disp *disp = outp->disp;
int ret;
mutex_lock(&disp->super.mutex);
ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc);
if (ret <= 0)
goto done;
done:
mutex_unlock(&disp->super.mutex);
return ret;
}
static void *
nvkm_uoutp_dtor(struct nvkm_object *object)
{
struct nvkm_outp *outp = nvkm_uoutp(object);
struct nvkm_disp *disp = outp->disp;
spin_lock(&disp->client.lock);
outp->object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_uoutp = {
.dtor = nvkm_uoutp_dtor,
.mthd = nvkm_uoutp_mthd,
};
int
nvkm_uoutp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct nvkm_outp *outt, *outp = NULL;
union nvif_outp_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
list_for_each_entry(outt, &disp->outps, head) {
if (outt->index == args->v0.id) {
outp = outt;
break;
}
}
if (!outp)
return -EINVAL;
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!outp->object.func) {
nvkm_object_ctor(&nvkm_uoutp, oclass, &outp->object);
*pobject = &outp->object;
ret = 0;
}
spin_unlock(&disp->client.lock);
return ret;
}