mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-11 17:55:54 +00:00
T3Q sucks cocks.
This commit is contained in:
parent
04c1822c0a
commit
3b9cff2bef
@ -202,8 +202,7 @@ struct acpi_device_flags {
|
||||
u32 coherent_dma:1;
|
||||
u32 cca_seen:1;
|
||||
u32 enumeration_by_parent:1;
|
||||
u32 honor_deps:1;
|
||||
u32 reserved:18;
|
||||
u32 reserved:19;
|
||||
};
|
||||
|
||||
/* File System */
|
||||
@ -279,14 +278,12 @@ struct acpi_device_power {
|
||||
int state; /* Current state */
|
||||
struct acpi_device_power_flags flags;
|
||||
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
|
||||
u8 state_for_enumeration; /* Deepest power state for enumeration */
|
||||
};
|
||||
|
||||
struct acpi_dep_data {
|
||||
struct list_head node;
|
||||
acpi_handle supplier;
|
||||
acpi_handle consumer;
|
||||
bool honor_dep;
|
||||
};
|
||||
|
||||
/* Performance Management */
|
||||
@ -360,7 +357,6 @@ struct acpi_gpio_mapping;
|
||||
|
||||
/* Device */
|
||||
struct acpi_device {
|
||||
u32 pld_crc;
|
||||
int device_type;
|
||||
acpi_handle handle; /* no handle for fixed hardware */
|
||||
struct fwnode_handle fwnode;
|
||||
@ -508,7 +504,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
|
||||
*/
|
||||
|
||||
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
|
||||
struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
|
||||
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
|
||||
unsigned long long *sta);
|
||||
int acpi_bus_get_status(struct acpi_device *device);
|
||||
@ -575,6 +570,7 @@ struct acpi_bus_type {
|
||||
bool (*match)(struct device *dev);
|
||||
struct acpi_device * (*find_companion)(struct device *);
|
||||
void (*setup)(struct device *);
|
||||
void (*cleanup)(struct device *);
|
||||
};
|
||||
int register_acpi_bus_type(struct acpi_bus_type *);
|
||||
int unregister_acpi_bus_type(struct acpi_bus_type *);
|
||||
@ -618,33 +614,12 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
|
||||
bool acpi_quirk_skip_acpi_ac_and_battery(void);
|
||||
#else
|
||||
static inline bool acpi_device_override_status(struct acpi_device *adev,
|
||||
unsigned long long *status)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
|
||||
bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
|
||||
int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
|
||||
#else
|
||||
static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline int
|
||||
acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
|
||||
{
|
||||
*skip = false;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -719,7 +694,6 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
|
||||
bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
|
||||
|
||||
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
|
||||
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
|
||||
struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier);
|
||||
struct acpi_device *
|
||||
acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define __ACPI_NUMA_H
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/numa.h>
|
||||
|
||||
/* Proximity bitmap length */
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
/* Current ACPICA subsystem version in YYYYMMDD format */
|
||||
|
||||
#define ACPI_CA_VERSION 0x20211217
|
||||
#define ACPI_CA_VERSION 0x20210730
|
||||
|
||||
#include <acpi/acconfig.h>
|
||||
#include <acpi/actypes.h>
|
||||
@ -454,11 +454,9 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
|
||||
* ACPI table load/unload interfaces
|
||||
*/
|
||||
ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
|
||||
acpi_install_table(struct acpi_table_header *table))
|
||||
acpi_install_table(acpi_physical_address address,
|
||||
u8 physical))
|
||||
|
||||
ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
|
||||
acpi_install_physical_table(acpi_physical_address
|
||||
address))
|
||||
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
|
||||
acpi_load_table(struct acpi_table_header *table,
|
||||
u32 *table_idx))
|
||||
|
@ -24,7 +24,6 @@
|
||||
* file. Useful because they make it more difficult to inadvertently type in
|
||||
* the wrong signature.
|
||||
*/
|
||||
#define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
|
||||
#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
|
||||
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
|
||||
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
|
||||
@ -36,7 +35,6 @@
|
||||
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
|
||||
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
|
||||
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
|
||||
#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
|
||||
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
|
||||
#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
|
||||
#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
|
||||
@ -48,8 +46,8 @@
|
||||
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
|
||||
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
|
||||
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
|
||||
#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
|
||||
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
|
||||
#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
|
||||
|
||||
/*
|
||||
* All tables must be byte-packed to match the ACPI specification, since
|
||||
@ -156,7 +154,7 @@ typedef struct acpi_aest_processor_tlb {
|
||||
/* 2R: Processor Generic Resource Substructure */
|
||||
|
||||
typedef struct acpi_aest_processor_generic {
|
||||
u32 resource;
|
||||
u8 *resource;
|
||||
|
||||
} acpi_aest_processor_generic;
|
||||
|
||||
@ -239,25 +237,6 @@ typedef struct acpi_aest_node_interrupt {
|
||||
#define ACPI_AEST_NODE_ERROR_RECOVERY 1
|
||||
#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */
|
||||
|
||||
/*******************************************************************************
|
||||
* AGDI - Arm Generic Diagnostic Dump and Reset Device Interface
|
||||
*
|
||||
* Conforms to "ACPI for Arm Components 1.1, Platform Design Document"
|
||||
* ARM DEN0093 v1.1
|
||||
*
|
||||
******************************************************************************/
|
||||
struct acpi_table_agdi {
|
||||
struct acpi_table_header header; /* Common ACPI table header */
|
||||
u8 flags;
|
||||
u8 reserved[3];
|
||||
u32 sdei_event;
|
||||
u32 gsiv;
|
||||
};
|
||||
|
||||
/* Mask for Flags field above */
|
||||
|
||||
#define ACPI_AGDI_SIGNALING_MODE (1)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* BDAT - BIOS Data ACPI Table
|
||||
@ -999,7 +978,6 @@ struct acpi_madt_multiproc_wakeup_mailbox {
|
||||
/* MADT Local APIC flags */
|
||||
|
||||
#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
|
||||
#define ACPI_MADT_ONLINE_CAPABLE (2) /* 01: System HW supports enabling processor at runtime */
|
||||
|
||||
/* MADT MPS INTI flags (inti_flags) */
|
||||
|
||||
@ -1431,269 +1409,6 @@ struct nfit_device_handle {
|
||||
#define ACPI_NFIT_GET_NODE_ID(handle) \
|
||||
(((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* NHLT - Non HD Audio Link Table
|
||||
*
|
||||
* Conforms to: Intel Smart Sound Technology NHLT Specification
|
||||
* Version 0.8.1, January 2020.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
/* Main table */
|
||||
|
||||
struct acpi_table_nhlt {
|
||||
struct acpi_table_header header; /* Common ACPI table header */
|
||||
u8 endpoint_count;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_endpoint {
|
||||
u32 descriptor_length;
|
||||
u8 link_type;
|
||||
u8 instance_id;
|
||||
u16 vendor_id;
|
||||
u16 device_id;
|
||||
u16 revision_id;
|
||||
u32 subsystem_id;
|
||||
u8 device_type;
|
||||
u8 direction;
|
||||
u8 virtual_bus_id;
|
||||
};
|
||||
|
||||
/* Types for link_type field above */
|
||||
|
||||
#define ACPI_NHLT_RESERVED_HD_AUDIO 0
|
||||
#define ACPI_NHLT_RESERVED_DSP 1
|
||||
#define ACPI_NHLT_PDM 2
|
||||
#define ACPI_NHLT_SSP 3
|
||||
#define ACPI_NHLT_RESERVED_SLIMBUS 4
|
||||
#define ACPI_NHLT_RESERVED_SOUNDWIRE 5
|
||||
#define ACPI_NHLT_TYPE_RESERVED 6 /* 6 and above are reserved */
|
||||
|
||||
/* All other values above are reserved */
|
||||
|
||||
/* Values for device_id field above */
|
||||
|
||||
#define ACPI_NHLT_PDM_DMIC 0xAE20
|
||||
#define ACPI_NHLT_BT_SIDEBAND 0xAE30
|
||||
#define ACPI_NHLT_I2S_TDM_CODECS 0xAE23
|
||||
|
||||
/* Values for device_type field above */
|
||||
|
||||
/* SSP Link */
|
||||
|
||||
#define ACPI_NHLT_LINK_BT_SIDEBAND 0
|
||||
#define ACPI_NHLT_LINK_FM 1
|
||||
#define ACPI_NHLT_LINK_MODEM 2
|
||||
/* 3 is reserved */
|
||||
#define ACPI_NHLT_LINK_SSP_ANALOG_CODEC 4
|
||||
|
||||
/* PDM Link */
|
||||
|
||||
#define ACPI_NHLT_PDM_ON_CAVS_1P8 0
|
||||
#define ACPI_NHLT_PDM_ON_CAVS_1P5 1
|
||||
|
||||
/* Values for Direction field above */
|
||||
|
||||
#define ACPI_NHLT_DIR_RENDER 0
|
||||
#define ACPI_NHLT_DIR_CAPTURE 1
|
||||
#define ACPI_NHLT_DIR_RENDER_LOOPBACK 2
|
||||
#define ACPI_NHLT_DIR_RENDER_FEEDBACK 3
|
||||
#define ACPI_NHLT_DIR_RESERVED 4 /* 4 and above are reserved */
|
||||
|
||||
struct acpi_nhlt_device_specific_config {
|
||||
u32 capabilities_size;
|
||||
u8 virtual_slot;
|
||||
u8 config_type;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_device_specific_config_a {
|
||||
u32 capabilities_size;
|
||||
u8 virtual_slot;
|
||||
u8 config_type;
|
||||
u8 array_type;
|
||||
};
|
||||
|
||||
/* Values for Config Type above */
|
||||
|
||||
#define ACPI_NHLT_CONFIG_TYPE_GENERIC 0x00
|
||||
#define ACPI_NHLT_CONFIG_TYPE_MIC_ARRAY 0x01
|
||||
#define ACPI_NHLT_CONFIG_TYPE_RENDER_FEEDBACK 0x03
|
||||
#define ACPI_NHLT_CONFIG_TYPE_RESERVED 0x04 /* 4 and above are reserved */
|
||||
|
||||
struct acpi_nhlt_device_specific_config_b {
|
||||
u32 capabilities_size;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_device_specific_config_c {
|
||||
u32 capabilities_size;
|
||||
u8 virtual_slot;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_render_device_specific_config {
|
||||
u32 capabilities_size;
|
||||
u8 virtual_slot;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_wave_extensible {
|
||||
u16 format_tag;
|
||||
u16 channel_count;
|
||||
u32 samples_per_sec;
|
||||
u32 avg_bytes_per_sec;
|
||||
u16 block_align;
|
||||
u16 bits_per_sample;
|
||||
u16 extra_format_size;
|
||||
u16 valid_bits_per_sample;
|
||||
u32 channel_mask;
|
||||
u8 sub_format_guid[16];
|
||||
};
|
||||
|
||||
/* Values for channel_mask above */
|
||||
|
||||
#define ACPI_NHLT_SPKR_FRONT_LEFT 0x1
|
||||
#define ACPI_NHLT_SPKR_FRONT_RIGHT 0x2
|
||||
#define ACPI_NHLT_SPKR_FRONT_CENTER 0x4
|
||||
#define ACPI_NHLT_SPKR_LOW_FREQ 0x8
|
||||
#define ACPI_NHLT_SPKR_BACK_LEFT 0x10
|
||||
#define ACPI_NHLT_SPKR_BACK_RIGHT 0x20
|
||||
#define ACPI_NHLT_SPKR_FRONT_LEFT_OF_CENTER 0x40
|
||||
#define ACPI_NHLT_SPKR_FRONT_RIGHT_OF_CENTER 0x80
|
||||
#define ACPI_NHLT_SPKR_BACK_CENTER 0x100
|
||||
#define ACPI_NHLT_SPKR_SIDE_LEFT 0x200
|
||||
#define ACPI_NHLT_SPKR_SIDE_RIGHT 0x400
|
||||
#define ACPI_NHLT_SPKR_TOP_CENTER 0x800
|
||||
#define ACPI_NHLT_SPKR_TOP_FRONT_LEFT 0x1000
|
||||
#define ACPI_NHLT_SPKR_TOP_FRONT_CENTER 0x2000
|
||||
#define ACPI_NHLT_SPKR_TOP_FRONT_RIGHT 0x4000
|
||||
#define ACPI_NHLT_SPKR_TOP_BACK_LEFT 0x8000
|
||||
#define ACPI_NHLT_SPKR_TOP_BACK_CENTER 0x10000
|
||||
#define ACPI_NHLT_SPKR_TOP_BACK_RIGHT 0x20000
|
||||
|
||||
struct acpi_nhlt_format_config {
|
||||
struct acpi_nhlt_wave_extensible format;
|
||||
u32 capability_size;
|
||||
u8 capabilities[];
|
||||
};
|
||||
|
||||
struct acpi_nhlt_formats_config {
|
||||
u8 formats_count;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_device_specific_hdr {
|
||||
u8 virtual_slot;
|
||||
u8 config_type;
|
||||
};
|
||||
|
||||
/* Types for config_type above */
|
||||
|
||||
#define ACPI_NHLT_GENERIC 0
|
||||
#define ACPI_NHLT_MIC 1
|
||||
#define ACPI_NHLT_RENDER 3
|
||||
|
||||
struct acpi_nhlt_mic_device_specific_config {
|
||||
struct acpi_nhlt_device_specific_hdr device_config;
|
||||
u8 array_type_ext;
|
||||
};
|
||||
|
||||
/* Values for array_type_ext above */
|
||||
|
||||
#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 // 9 and below are reserved
|
||||
#define ACPI_NHLT_SMALL_LINEAR_2ELEMENT 0x0A
|
||||
#define ACPI_NHLT_BIG_LINEAR_2ELEMENT 0x0B
|
||||
#define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C
|
||||
#define ACPI_NHLT_PLANAR_LSHAPED_4ELEMENT 0x0D
|
||||
#define ACPI_NHLT_SECOND_GEOMETRY_LINEAR_4ELEMENT 0x0E
|
||||
#define ACPI_NHLT_VENDOR_DEFINED 0x0F
|
||||
#define ACPI_NHLT_ARRAY_TYPE_MASK 0x0F
|
||||
#define ACPI_NHLT_ARRAY_TYPE_EXT_MASK 0x10
|
||||
|
||||
#define ACPI_NHLT_NO_EXTENSION 0x0
|
||||
#define ACPI_NHLT_MIC_SNR_SENSITIVITY_EXT (1<<4)
|
||||
|
||||
struct acpi_nhlt_vendor_mic_count {
|
||||
u8 microphone_count;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_vendor_mic_config {
|
||||
u8 type;
|
||||
u8 panel;
|
||||
u16 speaker_position_distance; // mm
|
||||
u16 horizontal_offset; // mm
|
||||
u16 vertical_offset; // mm
|
||||
u8 frequency_low_band; // 5*hz
|
||||
u8 frequency_high_band; // 500*hz
|
||||
u16 direction_angle; // -180 - + 180
|
||||
u16 elevation_angle; // -180 - + 180
|
||||
u16 work_vertical_angle_begin; // -180 - + 180 with 2 deg step
|
||||
u16 work_vertical_angle_end; // -180 - + 180 with 2 deg step
|
||||
u16 work_horizontal_angle_begin; // -180 - + 180 with 2 deg step
|
||||
u16 work_horizontal_angle_end; // -180 - + 180 with 2 deg step
|
||||
};
|
||||
|
||||
/* Values for Type field above */
|
||||
|
||||
#define ACPI_NHLT_MIC_OMNIDIRECTIONAL 0
|
||||
#define ACPI_NHLT_MIC_SUBCARDIOID 1
|
||||
#define ACPI_NHLT_MIC_CARDIOID 2
|
||||
#define ACPI_NHLT_MIC_SUPER_CARDIOID 3
|
||||
#define ACPI_NHLT_MIC_HYPER_CARDIOID 4
|
||||
#define ACPI_NHLT_MIC_8_SHAPED 5
|
||||
#define ACPI_NHLT_MIC_RESERVED6 6 // 6 is reserved
|
||||
#define ACPI_NHLT_MIC_VENDOR_DEFINED 7
|
||||
#define ACPI_NHLT_MIC_RESERVED 8 // 8 and above are reserved
|
||||
|
||||
/* Values for Panel field above */
|
||||
|
||||
#define ACPI_NHLT_MIC_POSITION_TOP 0
|
||||
#define ACPI_NHLT_MIC_POSITION_BOTTOM 1
|
||||
#define ACPI_NHLT_MIC_POSITION_LEFT 2
|
||||
#define ACPI_NHLT_MIC_POSITION_RIGHT 3
|
||||
#define ACPI_NHLT_MIC_POSITION_FRONT 4
|
||||
#define ACPI_NHLT_MIC_POSITION_BACK 5
|
||||
#define ACPI_NHLT_MIC_POSITION_RESERVED 6 // 6 and above are reserved
|
||||
|
||||
struct acpi_nhlt_vendor_mic_device_specific_config {
|
||||
struct acpi_nhlt_mic_device_specific_config mic_array_device_config;
|
||||
u8 number_of_microphones;
|
||||
struct acpi_nhlt_vendor_mic_config mic_config[]; // indexed by number_of_microphones
|
||||
};
|
||||
|
||||
/* Microphone SNR and Sensitivity extension */
|
||||
|
||||
struct acpi_nhlt_mic_snr_sensitivity_extension {
|
||||
u32 SNR;
|
||||
u32 sensitivity;
|
||||
};
|
||||
|
||||
/* Render device with feedback */
|
||||
|
||||
struct acpi_nhlt_render_feedback_device_specific_config {
|
||||
u8 feedback_virtual_slot; // render slot in case of capture
|
||||
u16 feedback_channels; // informative only
|
||||
u16 feedback_valid_bits_per_sample;
|
||||
};
|
||||
|
||||
/* Linux-specific structures */
|
||||
|
||||
struct acpi_nhlt_linux_specific_count {
|
||||
u8 structure_count;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_linux_specific_data {
|
||||
u8 device_id[16];
|
||||
u8 device_instance_id;
|
||||
u8 device_port_id;
|
||||
};
|
||||
|
||||
struct acpi_nhlt_linux_specific_data_b {
|
||||
u8 specific_data[18];
|
||||
};
|
||||
|
||||
struct acpi_nhlt_table_terminator {
|
||||
u32 terminator_value;
|
||||
u32 terminator_signature;
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* PCCT - Platform Communications Channel Table (ACPI 5.0)
|
||||
@ -2491,22 +2206,6 @@ enum acpi_svkl_format {
|
||||
ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* TDEL - TD-Event Log
|
||||
* From: "Guest-Host-Communication Interface (GHCI) for Intel
|
||||
* Trust Domain Extensions (Intel TDX)".
|
||||
* September 2020
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_table_tdel {
|
||||
struct acpi_table_header header; /* Common ACPI table header */
|
||||
u32 reserved;
|
||||
u64 log_area_minimum_length;
|
||||
u64 log_area_start_address;
|
||||
};
|
||||
|
||||
/* Reset to default packing */
|
||||
|
||||
#pragma pack()
|
||||
|
@ -191,8 +191,7 @@ enum acpi_srat_type {
|
||||
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
|
||||
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
|
||||
ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
|
||||
ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */
|
||||
ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
|
||||
ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -273,11 +272,7 @@ struct acpi_srat_gic_its_affinity {
|
||||
u32 its_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Common structure for SRAT subtable types:
|
||||
* 5: ACPI_SRAT_TYPE_GENERIC_AFFINITY
|
||||
* 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
|
||||
*/
|
||||
/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
|
||||
|
||||
struct acpi_srat_generic_affinity {
|
||||
struct acpi_subtable_header header;
|
||||
|
@ -509,6 +509,7 @@ typedef u64 acpi_integer;
|
||||
#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i))
|
||||
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
|
||||
#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
|
||||
#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
|
||||
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
|
||||
|
||||
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
|
||||
@ -1103,14 +1104,6 @@ struct acpi_connection_info {
|
||||
u8 access_length;
|
||||
};
|
||||
|
||||
/* Special Context data for PCC Opregion (ACPI 6.3) */
|
||||
|
||||
struct acpi_pcc_info {
|
||||
u8 subspace_id;
|
||||
u16 length;
|
||||
u8 *internal_buffer;
|
||||
};
|
||||
|
||||
typedef
|
||||
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
|
||||
u32 function,
|
||||
@ -1228,10 +1221,6 @@ struct acpi_mem_space_context {
|
||||
struct acpi_mem_mapping *first_mm;
|
||||
};
|
||||
|
||||
struct acpi_data_table_space_context {
|
||||
void *pointer;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
|
||||
*/
|
||||
@ -1298,7 +1287,6 @@ typedef enum {
|
||||
#define ACPI_OSI_WIN_10_RS4 0x12
|
||||
#define ACPI_OSI_WIN_10_RS5 0x13
|
||||
#define ACPI_OSI_WIN_10_19H1 0x14
|
||||
#define ACPI_OSI_WIN_10_20H1 0x15
|
||||
|
||||
/* Definitions of getopt */
|
||||
|
||||
|
@ -37,6 +37,9 @@ void __init acpi_hest_init(void);
|
||||
static inline void acpi_hest_init(void) { return; }
|
||||
#endif
|
||||
|
||||
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
|
||||
int apei_hest_parse(apei_hest_func_t func, void *data);
|
||||
|
||||
int erst_write(const struct cper_record_header *record);
|
||||
ssize_t erst_get_record_count(void);
|
||||
int erst_get_record_id_begin(int *pos);
|
||||
|
@ -138,7 +138,6 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
|
||||
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
|
||||
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
|
||||
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
|
||||
extern int cppc_set_enable(int cpu, bool enable);
|
||||
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
|
||||
extern bool acpi_cpc_valid(void);
|
||||
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
|
||||
@ -163,10 +162,6 @@ static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int cppc_set_enable(int cpu, bool enable)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
|
@ -9,27 +9,18 @@
|
||||
#include <linux/mailbox_controller.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
|
||||
struct pcc_mbox_chan {
|
||||
struct mbox_chan *mchan;
|
||||
u64 shmem_base_addr;
|
||||
u64 shmem_size;
|
||||
u32 latency;
|
||||
u32 max_access_rate;
|
||||
u16 min_turnaround_time;
|
||||
};
|
||||
|
||||
#define MAX_PCC_SUBSPACES 256
|
||||
#ifdef CONFIG_PCC
|
||||
extern struct pcc_mbox_chan *
|
||||
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
|
||||
extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
|
||||
extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
|
||||
int subspace_id);
|
||||
extern void pcc_mbox_free_channel(struct mbox_chan *chan);
|
||||
#else
|
||||
static inline struct pcc_mbox_chan *
|
||||
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
|
||||
static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
|
||||
int subspace_id)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
|
||||
static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
|
||||
#endif
|
||||
|
||||
#endif /* _PCC_H */
|
||||
|
@ -10,12 +10,25 @@
|
||||
#ifndef __ACGCC_H__
|
||||
#define __ACGCC_H__
|
||||
|
||||
/*
|
||||
* Use compiler specific <stdarg.h> is a good practice for even when
|
||||
* -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
|
||||
*/
|
||||
#ifndef va_arg
|
||||
#ifdef ACPI_USE_BUILTIN_STDARG
|
||||
typedef __builtin_va_list va_list;
|
||||
#define va_start(v, l) __builtin_va_start(v, l)
|
||||
#define va_end(v) __builtin_va_end(v)
|
||||
#define va_arg(v, l) __builtin_va_arg(v, l)
|
||||
#define va_copy(d, s) __builtin_va_copy(d, s)
|
||||
#else
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/stdarg.h>
|
||||
#else
|
||||
/* Used to build acpi tools */
|
||||
#include <stdarg.h>
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* ACPI_USE_BUILTIN_STDARG */
|
||||
#endif /* ! va_arg */
|
||||
|
||||
#define ACPI_INLINE __inline__
|
||||
|
@ -2,16 +2,11 @@
|
||||
#ifndef __ACPI_PROCESSOR_H
|
||||
#define __ACPI_PROCESSOR_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/thermal.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
|
||||
#define ACPI_PROCESSOR_CLASS "processor"
|
||||
|
@ -14,38 +14,12 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kcsan-checks.h>
|
||||
#include <asm/rwonce.h>
|
||||
|
||||
#ifndef nop
|
||||
#define nop() asm volatile ("nop")
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures that want generic instrumentation can define __ prefixed
|
||||
* variants of all barriers.
|
||||
*/
|
||||
|
||||
#ifdef __mb
|
||||
#define mb() do { kcsan_mb(); __mb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __rmb
|
||||
#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __wmb
|
||||
#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __dma_rmb
|
||||
#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __dma_wmb
|
||||
#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Force strict CPU ordering. And yes, this is required on UP too when we're
|
||||
* talking to devices.
|
||||
@ -88,15 +62,15 @@
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_mb
|
||||
#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
|
||||
#define smp_mb() __smp_mb()
|
||||
#endif
|
||||
|
||||
#ifndef smp_rmb
|
||||
#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
|
||||
#define smp_rmb() __smp_rmb()
|
||||
#endif
|
||||
|
||||
#ifndef smp_wmb
|
||||
#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
|
||||
#define smp_wmb() __smp_wmb()
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
@ -149,19 +123,19 @@ do { \
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_store_mb
|
||||
#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
|
||||
#define smp_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic
|
||||
#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
|
||||
#define smp_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic
|
||||
#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
|
||||
#define smp_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#endif
|
||||
|
||||
#ifndef smp_store_release
|
||||
#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
|
||||
#define smp_store_release(p, v) __smp_store_release(p, v)
|
||||
#endif
|
||||
|
||||
#ifndef smp_load_acquire
|
||||
@ -204,13 +178,13 @@ do { \
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* Barriers for virtual machine guests when talking to an SMP host */
|
||||
#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
|
||||
#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
|
||||
#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
|
||||
#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
|
||||
#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
|
||||
#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
|
||||
#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
|
||||
#define virt_mb() __smp_mb()
|
||||
#define virt_rmb() __smp_rmb()
|
||||
#define virt_wmb() __smp_wmb()
|
||||
#define virt_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#define virt_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#define virt_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#define virt_store_release(p, v) __smp_store_release(p, v)
|
||||
#define virt_load_acquire(p) __smp_load_acquire(p)
|
||||
|
||||
/**
|
||||
@ -277,16 +251,5 @@ do { \
|
||||
#define pmem_wmb() wmb()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ioremap_wc() maps I/O memory as memory with write-combining attributes. For
|
||||
* this kind of memory accesses, the CPU may wait for prior accesses to be
|
||||
* merged with subsequent ones. In some situation, such wait is bad for the
|
||||
* performance. io_stop_wc() can be used to prevent the merging of
|
||||
* write-combining memory accesses before this macro with those after it.
|
||||
*/
|
||||
#ifndef io_stop_wc
|
||||
#define io_stop_wc() do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_GENERIC_BARRIER_H */
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <asm-generic/bitops/fls.h>
|
||||
#include <asm-generic/bitops/__fls.h>
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
|
@ -1,6 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
|
||||
#define _ASM_GENERIC_BITOPS_FIND_H_
|
||||
|
||||
extern unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
unsigned long start, unsigned long invert, unsigned long le);
|
||||
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
||||
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
||||
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
|
||||
|
||||
#ifndef find_next_bit
|
||||
/**
|
||||
* find_next_bit - find the next set bit in a memory region
|
||||
@ -11,8 +19,52 @@
|
||||
* Returns the bit number for the next set bit
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
|
||||
size, unsigned long offset);
|
||||
static inline
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = *addr & GENMASK(size - 1, offset);
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_next_and_bit
|
||||
/**
|
||||
* find_next_and_bit - find the next set bit in both memory regions
|
||||
* @addr1: The first address to base the search on
|
||||
* @addr2: The second address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number for the next set bit
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
static inline
|
||||
unsigned long find_next_and_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = *addr1 & *addr2 & GENMASK(size - 1, offset);
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
@ -25,12 +77,27 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
|
||||
* Returns the bit number of the next zero bit
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
|
||||
long size, unsigned long offset);
|
||||
static inline
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = *addr | ~GENMASK(size - 1, offset);
|
||||
return val == ~0UL ? size : ffz(val);
|
||||
}
|
||||
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
|
||||
|
||||
#ifndef find_first_bit
|
||||
/**
|
||||
* find_first_bit - find the first set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
@ -39,9 +106,20 @@ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
|
||||
* Returns the bit number of the first set bit.
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_first_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
static inline
|
||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr & GENMASK(size - 1, 0);
|
||||
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_first_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit
|
||||
/**
|
||||
* find_first_zero_bit - find the first cleared bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
@ -50,13 +128,66 @@ extern unsigned long find_first_bit(const unsigned long *addr,
|
||||
* Returns the bit number of the first cleared bit.
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
extern unsigned long find_first_zero_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
static inline
|
||||
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr | ~GENMASK(size - 1, 0);
|
||||
|
||||
return val == ~0UL ? size : ffz(val);
|
||||
}
|
||||
|
||||
return _find_first_zero_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
#ifndef find_first_bit
|
||||
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
||||
#endif
|
||||
#ifndef find_first_zero_bit
|
||||
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
#ifndef find_last_bit
|
||||
/**
|
||||
* find_last_bit - find the last set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The number of bits to search
|
||||
*
|
||||
* Returns the bit number of the last set bit, or size.
|
||||
*/
|
||||
static inline
|
||||
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr & GENMASK(size - 1, 0);
|
||||
|
||||
return val ? __fls(val) : size;
|
||||
}
|
||||
|
||||
return _find_last_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
|
||||
* @clump: location to store copy of found clump
|
||||
* @addr: address to base the search on
|
||||
* @size: bitmap size in number of bits
|
||||
* @offset: bit offset at which to start searching
|
||||
*
|
||||
* Returns the bit offset for the next set clump; the found clump value is
|
||||
* copied to the location pointed by @clump. If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_clump8(unsigned long *clump,
|
||||
const unsigned long *addr,
|
||||
unsigned long size, unsigned long offset);
|
||||
|
||||
#define find_first_clump8(clump, bits, size) \
|
||||
find_next_clump8((clump), (bits), (size), 0)
|
||||
|
||||
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
|
||||
|
@ -67,7 +67,6 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_set_bit(nr, addr);
|
||||
}
|
||||
@ -81,7 +80,6 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_clear_bit(nr, addr);
|
||||
}
|
||||
@ -95,7 +93,6 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch_clear_bit_unlock(nr, addr);
|
||||
}
|
||||
@ -38,7 +37,6 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch___clear_bit_unlock(nr, addr);
|
||||
}
|
||||
@ -73,7 +71,6 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
static inline bool
|
||||
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
|
||||
}
|
||||
|
@ -2,19 +2,83 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_LE_H_
|
||||
#define _ASM_GENERIC_BITOPS_LE_H_
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/swab.h>
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
|
||||
#define BITOP_LE_SWIZZLE 0
|
||||
|
||||
static inline unsigned long find_next_zero_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_zero_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static inline unsigned long find_next_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static inline unsigned long find_first_zero_bit_le(const void *addr,
|
||||
unsigned long size)
|
||||
{
|
||||
return find_first_zero_bit(addr, size);
|
||||
}
|
||||
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
#ifndef find_next_zero_bit_le
|
||||
static inline
|
||||
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *(const unsigned long *)addr;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = swab(val) | ~GENMASK(size - 1, offset);
|
||||
return val == ~0UL ? size : ffz(val);
|
||||
}
|
||||
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_next_bit_le
|
||||
static inline
|
||||
unsigned long find_next_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *(const unsigned long *)addr;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = swab(val) & GENMASK(size - 1, offset);
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit_le
|
||||
#define find_first_zero_bit_le(addr, size) \
|
||||
find_next_zero_bit_le((addr), (size), 0)
|
||||
#endif
|
||||
|
||||
#else
|
||||
#error "Please fix <asm/byteorder.h>"
|
||||
#endif
|
||||
|
||||
static inline int test_bit_le(int nr, const void *addr)
|
||||
{
|
||||
|
@ -20,7 +20,7 @@ struct pt_regs;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
|
||||
/*
|
||||
* Whitelist generating macro. Specify functions which can be
|
||||
* Whitelist ganerating macro. Specify functions which can be
|
||||
* error-injectable using this macro.
|
||||
*/
|
||||
#define ALLOW_ERROR_INJECTION(fname, _etype) \
|
||||
@ -29,7 +29,7 @@ static struct error_injection_entry __used \
|
||||
_eil_addr_##fname = { \
|
||||
.addr = (unsigned long)fname, \
|
||||
.etype = EI_ETYPE_##_etype, \
|
||||
}
|
||||
};
|
||||
|
||||
void override_function_with_return(struct pt_regs *regs);
|
||||
#else
|
||||
|
@ -6,22 +6,15 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifndef futex_atomic_cmpxchg_inatomic
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* The following implementation only for uniprocessor machines.
|
||||
* It relies on preempt_disable() ensuring mutual exclusion.
|
||||
*
|
||||
*/
|
||||
#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
|
||||
futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
|
||||
#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
|
||||
futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif
|
||||
|
||||
/**
|
||||
* futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
|
||||
* arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
|
||||
* argument and comparison of the previous
|
||||
* futex value with another constant.
|
||||
*
|
||||
@ -35,7 +28,7 @@
|
||||
* -ENOSYS - Operation not supported
|
||||
*/
|
||||
static inline int
|
||||
futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
{
|
||||
int oldval, ret;
|
||||
u32 tmp;
|
||||
@ -82,7 +75,7 @@ futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
}
|
||||
|
||||
/**
|
||||
* futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
|
||||
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
|
||||
* uaddr with newval if the current value is
|
||||
* oldval.
|
||||
* @uval: pointer to store content of @uaddr
|
||||
@ -94,9 +87,10 @@ futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
* 0 - On success
|
||||
* -EFAULT - User access resulted in a page fault
|
||||
* -EAGAIN - Atomic operation was unable to complete due to contention
|
||||
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
|
||||
*/
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
u32 val;
|
||||
@ -118,4 +112,19 @@ futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif
|
||||
|
@ -158,7 +158,6 @@ struct ms_hyperv_tsc_page {
|
||||
#define HVCALL_RETARGET_INTERRUPT 0x007e
|
||||
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
|
||||
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
|
||||
#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
|
||||
|
||||
/* Extended hypercalls */
|
||||
#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
|
||||
@ -540,6 +539,39 @@ enum hv_interrupt_source {
|
||||
HV_INTERRUPT_SOURCE_IOAPIC,
|
||||
};
|
||||
|
||||
union hv_msi_address_register {
|
||||
u32 as_uint32;
|
||||
struct {
|
||||
u32 reserved1:2;
|
||||
u32 destination_mode:1;
|
||||
u32 redirection_hint:1;
|
||||
u32 reserved2:8;
|
||||
u32 destination_id:8;
|
||||
u32 msi_base:12;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
union hv_msi_data_register {
|
||||
u32 as_uint32;
|
||||
struct {
|
||||
u32 vector:8;
|
||||
u32 delivery_mode:3;
|
||||
u32 reserved1:3;
|
||||
u32 level_assert:1;
|
||||
u32 trigger_mode:1;
|
||||
u32 reserved2:16;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* HvRetargetDeviceInterrupt hypercall */
|
||||
union hv_msi_entry {
|
||||
u64 as_uint64;
|
||||
struct {
|
||||
union hv_msi_address_register address;
|
||||
union hv_msi_data_register data;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
union hv_ioapic_rte {
|
||||
u64 as_uint64;
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
void __iomem *ioremap(phys_addr_t offset, size_t size);
|
||||
|
||||
#define iounmap iounmap
|
||||
void iounmap(void volatile __iomem *addr);
|
||||
void iounmap(void __iomem *addr);
|
||||
|
||||
#define __raw_readb __raw_readb
|
||||
u8 __raw_readb(const volatile void __iomem *addr);
|
||||
|
@ -35,26 +35,15 @@ struct ms_hyperv_info {
|
||||
u32 max_vp_index;
|
||||
u32 max_lp_index;
|
||||
u32 isolation_config_a;
|
||||
union {
|
||||
u32 isolation_config_b;
|
||||
struct {
|
||||
u32 cvm_type : 4;
|
||||
u32 reserved1 : 1;
|
||||
u32 shared_gpa_boundary_active : 1;
|
||||
u32 shared_gpa_boundary_bits : 6;
|
||||
u32 reserved2 : 20;
|
||||
};
|
||||
};
|
||||
u64 shared_gpa_boundary;
|
||||
u32 isolation_config_b;
|
||||
};
|
||||
extern struct ms_hyperv_info ms_hyperv;
|
||||
|
||||
extern void * __percpu *hyperv_pcpu_input_arg;
|
||||
extern void * __percpu *hyperv_pcpu_output_arg;
|
||||
extern void __percpu **hyperv_pcpu_input_arg;
|
||||
extern void __percpu **hyperv_pcpu_output_arg;
|
||||
|
||||
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
|
||||
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
|
||||
extern bool hv_isolation_type_snp(void);
|
||||
|
||||
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
|
||||
static inline int hv_result(u64 status)
|
||||
@ -265,21 +254,12 @@ bool hv_is_hyperv_initialized(void);
|
||||
bool hv_is_hibernation_supported(void);
|
||||
enum hv_isolation_type hv_get_isolation_type(void);
|
||||
bool hv_is_isolation_supported(void);
|
||||
bool hv_isolation_type_snp(void);
|
||||
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
|
||||
void hyperv_cleanup(void);
|
||||
bool hv_query_ext_cap(u64 cap_query);
|
||||
void *hv_map_memory(void *addr, unsigned long size);
|
||||
void hv_unmap_memory(void *addr);
|
||||
#else /* CONFIG_HYPERV */
|
||||
static inline bool hv_is_hyperv_initialized(void) { return false; }
|
||||
static inline bool hv_is_hibernation_supported(void) { return false; }
|
||||
static inline void hyperv_cleanup(void) {}
|
||||
static inline bool hv_is_isolation_supported(void) { return false; }
|
||||
static inline enum hv_isolation_type hv_get_isolation_type(void)
|
||||
{
|
||||
return HV_ISOLATION_TYPE_NONE;
|
||||
}
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
#endif
|
||||
|
@ -147,15 +147,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
|
||||
static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
gfp_t gfp = GFP_PGTABLE_USER;
|
||||
|
||||
if (mm == &init_mm)
|
||||
gfp = GFP_PGTABLE_KERNEL;
|
||||
return (pud_t *)get_zeroed_page(gfp);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
|
||||
/**
|
||||
* pud_alloc_one - allocate a page for PUD-level page table
|
||||
@ -168,23 +159,20 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
*/
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return __pud_alloc_one(mm, addr);
|
||||
gfp_t gfp = GFP_PGTABLE_USER;
|
||||
|
||||
if (mm == &init_mm)
|
||||
gfp = GFP_PGTABLE_KERNEL;
|
||||
return (pud_t *)get_zeroed_page(gfp);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
{
|
||||
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pud);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_PUD_FREE
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
{
|
||||
__pud_free(mm, pud);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
#ifndef __HAVE_ARCH_PGD_FREE
|
||||
|
@ -64,6 +64,36 @@ extern __visible const void __nosave_begin, __nosave_end;
|
||||
#define dereference_kernel_function_descriptor(p) ((void *)(p))
|
||||
#endif
|
||||
|
||||
/* random extra sections (if any). Override
|
||||
* in asm/sections.h */
|
||||
#ifndef arch_is_kernel_text
|
||||
static inline int arch_is_kernel_text(unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef arch_is_kernel_data
|
||||
static inline int arch_is_kernel_data(unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if an address is part of freed initmem. This is needed on architectures
|
||||
* with virt == phys kernel mapping, for code that wants to check if an address
|
||||
* is part of a static object within [_stext, _end]. After initmem is freed,
|
||||
* memory can be allocated from it, and such allocations would then have
|
||||
* addresses within the range [_stext, _end].
|
||||
*/
|
||||
#ifndef arch_is_kernel_initmem_freed
|
||||
static inline int arch_is_kernel_initmem_freed(unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* memory_contains - checks if an object is contained within a memory region
|
||||
* @begin: virtual address of the beginning of the memory region
|
||||
@ -128,28 +158,6 @@ static inline bool init_section_intersects(void *virt, size_t size)
|
||||
return memory_intersects(__init_begin, __init_end, virt, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* is_kernel_core_data - checks if the pointer address is located in the
|
||||
* .data or .bss section
|
||||
*
|
||||
* @addr: address to check
|
||||
*
|
||||
* Returns: true if the address is located in .data or .bss, false otherwise.
|
||||
* Note: On some archs it may return true for core RODATA, and false
|
||||
* for others. But will always be true for core RW data.
|
||||
*/
|
||||
static inline bool is_kernel_core_data(unsigned long addr)
|
||||
{
|
||||
if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
|
||||
return true;
|
||||
|
||||
if (addr >= (unsigned long)__bss_start &&
|
||||
addr < (unsigned long)__bss_stop)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_kernel_rodata - checks if the pointer address is located in the
|
||||
* .rodata section
|
||||
@ -164,51 +172,4 @@ static inline bool is_kernel_rodata(unsigned long addr)
|
||||
addr < (unsigned long)__end_rodata;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_kernel_inittext - checks if the pointer address is located in the
|
||||
* .init.text section
|
||||
*
|
||||
* @addr: address to check
|
||||
*
|
||||
* Returns: true if the address is located in .init.text, false otherwise.
|
||||
*/
|
||||
static inline bool is_kernel_inittext(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)_sinittext &&
|
||||
addr < (unsigned long)_einittext;
|
||||
}
|
||||
|
||||
/**
|
||||
* __is_kernel_text - checks if the pointer address is located in the
|
||||
* .text section
|
||||
*
|
||||
* @addr: address to check
|
||||
*
|
||||
* Returns: true if the address is located in .text, false otherwise.
|
||||
* Note: an internal helper, only check the range of _stext to _etext.
|
||||
*/
|
||||
static inline bool __is_kernel_text(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)_stext &&
|
||||
addr < (unsigned long)_etext;
|
||||
}
|
||||
|
||||
/**
|
||||
* __is_kernel - checks if the pointer address is located in the kernel range
|
||||
*
|
||||
* @addr: address to check
|
||||
*
|
||||
* Returns: true if the address is located in the kernel range, false otherwise.
|
||||
* Note: an internal helper, check the range of _stext to _end,
|
||||
* and range from __init_begin to __init_end, which can be outside
|
||||
* of the _stext to _end range.
|
||||
*/
|
||||
static inline bool __is_kernel(unsigned long addr)
|
||||
{
|
||||
return ((addr >= (unsigned long)_stext &&
|
||||
addr < (unsigned long)_end) ||
|
||||
(addr >= (unsigned long)__init_begin &&
|
||||
addr < (unsigned long)__init_end));
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_SECTIONS_H_ */
|
||||
|
@ -117,6 +117,22 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
|
||||
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *args);
|
||||
|
||||
/**
|
||||
* syscall_set_arguments - change system call parameter value
|
||||
* @task: task of interest, must be in system call entry tracing
|
||||
* @regs: task_pt_regs() of @task
|
||||
* @args: array of argument values to store
|
||||
*
|
||||
* Changes 6 arguments to the system call.
|
||||
* The first argument gets value @args[0], and so on.
|
||||
*
|
||||
* It's only valid to call this when @task is stopped for tracing on
|
||||
* entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
|
||||
* %SYSCALL_WORK_SYSCALL_AUDIT.
|
||||
*/
|
||||
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
const unsigned long *args);
|
||||
|
||||
/**
|
||||
* syscall_get_arch - return the AUDIT_ARCH for the current system call
|
||||
* @task: task of interest, must be blocked
|
||||
|
@ -164,22 +164,16 @@
|
||||
* Need to also make ftrace_stub_graph point to ftrace_stub
|
||||
* so that the same stub location may have different protocols
|
||||
* and not mess up with C verifiers.
|
||||
*
|
||||
* ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
|
||||
* as some archs will have a different prototype for that function
|
||||
* but ftrace_ops_list_func() will have a single prototype.
|
||||
*/
|
||||
#define MCOUNT_REC() . = ALIGN(8); \
|
||||
__start_mcount_loc = .; \
|
||||
KEEP(*(__mcount_loc)) \
|
||||
KEEP(*(__patchable_function_entries)) \
|
||||
__stop_mcount_loc = .; \
|
||||
ftrace_stub_graph = ftrace_stub; \
|
||||
ftrace_ops_list_func = arch_ftrace_ops_list_func;
|
||||
ftrace_stub_graph = ftrace_stub;
|
||||
#else
|
||||
# ifdef CONFIG_FUNCTION_TRACER
|
||||
# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
|
||||
ftrace_ops_list_func = arch_ftrace_ops_list_func;
|
||||
# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
|
||||
# else
|
||||
# define MCOUNT_REC()
|
||||
# endif
|
||||
@ -476,7 +470,13 @@
|
||||
__end_pci_fixups_suspend_late = .; \
|
||||
} \
|
||||
\
|
||||
FW_LOADER_BUILT_IN_DATA \
|
||||
/* Built-in firmware blobs */ \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
|
||||
__start_builtin_fw = .; \
|
||||
KEEP(*(.builtin_fw)) \
|
||||
__end_builtin_fw = .; \
|
||||
} \
|
||||
\
|
||||
TRACEDATA \
|
||||
\
|
||||
PRINTK_INDEX \
|
||||
@ -869,11 +869,10 @@
|
||||
KEEP(*(.orc_unwind)) \
|
||||
__stop_orc_unwind = .; \
|
||||
} \
|
||||
text_size = _etext - _stext; \
|
||||
. = ALIGN(4); \
|
||||
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
|
||||
orc_lookup = .; \
|
||||
. += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
|
||||
. += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
|
||||
LOOKUP_BLOCK_SIZE) + 1) * 4; \
|
||||
orc_lookup_end = .; \
|
||||
}
|
||||
@ -881,18 +880,6 @@
|
||||
#define ORC_UNWIND_TABLE
|
||||
#endif
|
||||
|
||||
/* Built-in firmware blobs */
|
||||
#ifdef CONFIG_FW_LOADER
|
||||
#define FW_LOADER_BUILT_IN_DATA \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
|
||||
__start_builtin_fw = .; \
|
||||
KEEP(*(.builtin_fw)) \
|
||||
__end_builtin_fw = .; \
|
||||
}
|
||||
#else
|
||||
#define FW_LOADER_BUILT_IN_DATA
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_TRACE
|
||||
#define TRACEDATA \
|
||||
. = ALIGN(4); \
|
||||
|
@ -8,10 +8,9 @@
|
||||
#ifndef _CRYPTO_AEAD_H
|
||||
#define _CRYPTO_AEAD_H
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
|
||||
@ -74,7 +73,6 @@
|
||||
*/
|
||||
|
||||
struct crypto_aead;
|
||||
struct scatterlist;
|
||||
|
||||
/**
|
||||
* struct aead_request - AEAD request
|
||||
|
@ -7,11 +7,9 @@
|
||||
#ifndef _CRYPTO_ALGAPI_H
|
||||
#define _CRYPTO_ALGAPI_H
|
||||
|
||||
#include <linux/align.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
/*
|
||||
* Maximum values for blocksize and alignmask, used to allocate
|
||||
@ -26,7 +24,6 @@
|
||||
struct crypto_aead;
|
||||
struct crypto_instance;
|
||||
struct module;
|
||||
struct notifier_block;
|
||||
struct rtattr;
|
||||
struct seq_file;
|
||||
struct sk_buff;
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
enum blake2b_lengths {
|
||||
|
@ -7,8 +7,8 @@
|
||||
#define _CRYPTO_BLAKE2S_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
enum blake2s_lengths {
|
||||
@ -101,4 +101,7 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
|
||||
blake2s_final(&state, out);
|
||||
}
|
||||
|
||||
void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
|
||||
const size_t keylen);
|
||||
|
||||
#endif /* _CRYPTO_BLAKE2S_H */
|
||||
|
@ -47,19 +47,12 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
|
||||
hchacha_block_generic(state, out, nrounds);
|
||||
}
|
||||
|
||||
enum chacha_constants { /* expand 32-byte k */
|
||||
CHACHA_CONSTANT_EXPA = 0x61707865U,
|
||||
CHACHA_CONSTANT_ND_3 = 0x3320646eU,
|
||||
CHACHA_CONSTANT_2_BY = 0x79622d32U,
|
||||
CHACHA_CONSTANT_TE_K = 0x6b206574U
|
||||
};
|
||||
|
||||
static inline void chacha_init_consts(u32 *state)
|
||||
{
|
||||
state[0] = CHACHA_CONSTANT_EXPA;
|
||||
state[1] = CHACHA_CONSTANT_ND_3;
|
||||
state[2] = CHACHA_CONSTANT_2_BY;
|
||||
state[3] = CHACHA_CONSTANT_TE_K;
|
||||
state[0] = 0x61707865; /* "expa" */
|
||||
state[1] = 0x3320646e; /* "nd 3" */
|
||||
state[2] = 0x79622d32; /* "2-by" */
|
||||
state[3] = 0x6b206574; /* "te k" */
|
||||
}
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
|
||||
|
@ -105,12 +105,6 @@ struct drbg_test_data {
|
||||
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
|
||||
};
|
||||
|
||||
enum drbg_seed_state {
|
||||
DRBG_SEED_STATE_UNSEEDED,
|
||||
DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
|
||||
DRBG_SEED_STATE_FULL,
|
||||
};
|
||||
|
||||
struct drbg_state {
|
||||
struct mutex drbg_mutex; /* lock around DRBG */
|
||||
unsigned char *V; /* internal state 10.1.1.1 1a) */
|
||||
@ -133,15 +127,16 @@ struct drbg_state {
|
||||
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
|
||||
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
|
||||
|
||||
enum drbg_seed_state seeded; /* DRBG fully seeded? */
|
||||
unsigned long last_seed_time;
|
||||
bool seeded; /* DRBG fully seeded? */
|
||||
bool pr; /* Prediction resistance enabled? */
|
||||
bool fips_primed; /* Continuous test primed? */
|
||||
unsigned char *prev; /* FIPS 140-2 continuous test value */
|
||||
struct work_struct seed_work; /* asynchronous seeding support */
|
||||
struct crypto_rng *jent;
|
||||
const struct drbg_state_ops *d_ops;
|
||||
const struct drbg_core *core;
|
||||
struct drbg_string test_data;
|
||||
struct random_ready_callback random_ready;
|
||||
};
|
||||
|
||||
static inline __u8 drbg_statelen(struct drbg_state *drbg)
|
||||
|
@ -9,18 +9,13 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/kpp.h>
|
||||
|
||||
struct device;
|
||||
|
||||
#define ENGINE_NAME_LEN 30
|
||||
/*
|
||||
@ -101,8 +96,6 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct akcipher_request *req);
|
||||
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
||||
struct ahash_request *req);
|
||||
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
|
||||
struct kpp_request *req);
|
||||
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct skcipher_request *req);
|
||||
void crypto_finalize_aead_request(struct crypto_engine *engine,
|
||||
@ -111,8 +104,6 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
|
||||
struct akcipher_request *req, int err);
|
||||
void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, int err);
|
||||
void crypto_finalize_kpp_request(struct crypto_engine *engine,
|
||||
struct kpp_request *req, int err);
|
||||
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
|
||||
struct skcipher_request *req, int err);
|
||||
int crypto_engine_start(struct crypto_engine *engine);
|
||||
|
@ -11,11 +11,11 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
|
||||
void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
bool blake2s_selftest(void);
|
||||
|
||||
@ -24,11 +24,14 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
|
||||
state->f[0] = -1;
|
||||
}
|
||||
|
||||
typedef void (*blake2s_compress_t)(struct blake2s_state *state,
|
||||
const u8 *block, size_t nblocks, u32 inc);
|
||||
|
||||
/* Helper functions for BLAKE2s shared by the library and shash APIs */
|
||||
|
||||
static __always_inline void
|
||||
__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
||||
bool force_generic)
|
||||
static inline void __blake2s_update(struct blake2s_state *state,
|
||||
const u8 *in, size_t inlen,
|
||||
blake2s_compress_t compress)
|
||||
{
|
||||
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
|
||||
|
||||
@ -36,12 +39,7 @@ __blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
||||
return;
|
||||
if (inlen > fill) {
|
||||
memcpy(state->buf + state->buflen, in, fill);
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, state->buf, 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
else
|
||||
blake2s_compress(state, state->buf, 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
(*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
|
||||
state->buflen = 0;
|
||||
in += fill;
|
||||
inlen -= fill;
|
||||
@ -49,12 +47,7 @@ __blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
||||
if (inlen > BLAKE2S_BLOCK_SIZE) {
|
||||
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
|
||||
/* Hash one less (full) block than strictly possible */
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, in, nblocks - 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
else
|
||||
blake2s_compress(state, in, nblocks - 1,
|
||||
BLAKE2S_BLOCK_SIZE);
|
||||
(*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
|
||||
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
|
||||
}
|
||||
@ -62,16 +55,13 @@ __blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
|
||||
state->buflen += inlen;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
|
||||
static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
|
||||
blake2s_compress_t compress)
|
||||
{
|
||||
blake2s_set_lastblock(state);
|
||||
memset(state->buf + state->buflen, 0,
|
||||
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
|
||||
if (force_generic)
|
||||
blake2s_compress_generic(state, state->buf, 1, state->buflen);
|
||||
else
|
||||
blake2s_compress(state, state->buf, 1, state->buflen);
|
||||
(*compress)(state, state->buf, 1, state->buflen);
|
||||
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
|
||||
memcpy(out, state->h, state->outlen);
|
||||
}
|
||||
@ -109,20 +99,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
|
||||
|
||||
static inline int crypto_blake2s_update(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen,
|
||||
bool force_generic)
|
||||
blake2s_compress_t compress)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
__blake2s_update(state, in, inlen, force_generic);
|
||||
__blake2s_update(state, in, inlen, compress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
|
||||
bool force_generic)
|
||||
blake2s_compress_t compress)
|
||||
{
|
||||
struct blake2s_state *state = shash_desc_ctx(desc);
|
||||
|
||||
__blake2s_final(state, out, force_generic);
|
||||
__blake2s_final(state, out, compress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -9,8 +9,8 @@
|
||||
#ifndef _CRYPTO_PCRYPT_H
|
||||
#define _CRYPTO_PCRYPT_H
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/padata.h>
|
||||
|
||||
struct pcrypt_request {
|
||||
|
@ -12,9 +12,8 @@
|
||||
#define _CRYPTO_SCATTERWALK_H
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
|
||||
|
@ -8,13 +8,9 @@
|
||||
#ifndef _CRYPTO_SKCIPHER_H
|
||||
#define _CRYPTO_SKCIPHER_H
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct scatterlist;
|
||||
|
||||
/**
|
||||
* struct skcipher_request - Symmetric key cipher request
|
||||
|
@ -62,7 +62,6 @@ enum amd_asic_type {
|
||||
CHIP_DIMGREY_CAVEFISH, /* 33 */
|
||||
CHIP_BEIGE_GOBY, /* 34 */
|
||||
CHIP_YELLOW_CARP, /* 35 */
|
||||
CHIP_IP_DISCOVERY, /* 36 */
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
|
@ -718,14 +718,6 @@ enum drm_bridge_ops {
|
||||
* this flag shall implement the &drm_bridge_funcs->get_modes callback.
|
||||
*/
|
||||
DRM_BRIDGE_OP_MODES = BIT(3),
|
||||
/**
|
||||
* @DRM_BRIDGE_OP_UPSTREAM_FIRST: The bridge can requires
|
||||
* that the upstream node pre_enable is called before its pre_enable,
|
||||
* and conversely for post_disables. This is most frequently a
|
||||
* requirement for DSI devices which need the host to be initialised
|
||||
* before them.
|
||||
*/
|
||||
DRM_BRIDGE_OP_UPSTREAM_FIRST = BIT(4),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -769,6 +761,14 @@ struct drm_bridge {
|
||||
* modes.
|
||||
*/
|
||||
bool interlace_allowed;
|
||||
/**
|
||||
* @pre_enable_upstream_first: The bridge requires that the upstream
|
||||
* bridge @pre_enable function is called before its @pre_enable,
|
||||
* and conversely for post_disable. This is most frequently a
|
||||
* requirement for DSI devices which need the host to be initialised
|
||||
* before the peripheral.
|
||||
*/
|
||||
bool pre_enable_upstream_first;
|
||||
/**
|
||||
* @ddc: Associated I2C adapter for DDC access, if any.
|
||||
*/
|
||||
@ -798,19 +798,11 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
|
||||
|
||||
void drm_bridge_add(struct drm_bridge *bridge);
|
||||
void drm_bridge_remove(struct drm_bridge *bridge);
|
||||
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
|
||||
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
|
||||
struct drm_bridge *previous,
|
||||
enum drm_bridge_attach_flags flags);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
|
||||
#else
|
||||
static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
|
||||
* @bridge: bridge object
|
||||
@ -930,17 +922,4 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
|
||||
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
|
||||
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
|
||||
u32 port, u32 endpoint);
|
||||
#else
|
||||
static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
|
||||
struct device_node *node,
|
||||
u32 port,
|
||||
u32 endpoint)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <linux/llist.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <drm/drm_mode_object.h>
|
||||
#include <drm/drm_util.h>
|
||||
|
||||
@ -41,7 +40,6 @@ struct drm_encoder;
|
||||
struct drm_property;
|
||||
struct drm_property_blob;
|
||||
struct drm_printer;
|
||||
struct drm_privacy_screen;
|
||||
struct edid;
|
||||
struct i2c_adapter;
|
||||
|
||||
@ -322,30 +320,6 @@ struct drm_monitor_range_info {
|
||||
u8 max_vfreq;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum drm_privacy_screen_status - privacy screen status
|
||||
*
|
||||
* This enum is used to track and control the state of the integrated privacy
|
||||
* screen present on some display panels, via the "privacy-screen sw-state"
|
||||
* and "privacy-screen hw-state" properties. Note the _LOCKED enum values
|
||||
* are only valid for the "privacy-screen hw-state" property.
|
||||
*
|
||||
* @PRIVACY_SCREEN_DISABLED:
|
||||
* The privacy-screen on the panel is disabled
|
||||
* @PRIVACY_SCREEN_ENABLED:
|
||||
* The privacy-screen on the panel is enabled
|
||||
* @PRIVACY_SCREEN_DISABLED_LOCKED:
|
||||
* The privacy-screen on the panel is disabled and locked (cannot be changed)
|
||||
* @PRIVACY_SCREEN_ENABLED_LOCKED:
|
||||
* The privacy-screen on the panel is enabled and locked (cannot be changed)
|
||||
*/
|
||||
enum drm_privacy_screen_status {
|
||||
PRIVACY_SCREEN_DISABLED = 0,
|
||||
PRIVACY_SCREEN_ENABLED,
|
||||
PRIVACY_SCREEN_DISABLED_LOCKED,
|
||||
PRIVACY_SCREEN_ENABLED_LOCKED,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is a consolidated colorimetry list supported by HDMI and
|
||||
* DP protocol standard. The respective connectors will register
|
||||
@ -616,18 +590,6 @@ struct drm_display_info {
|
||||
* @monitor_range: Frequency range supported by monitor range descriptor
|
||||
*/
|
||||
struct drm_monitor_range_info monitor_range;
|
||||
|
||||
/**
|
||||
* @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
|
||||
* the DisplayID VESA vendor block. 0 for conventional Single-Stream
|
||||
* Transport (SST), or 2 or 4 MSO streams.
|
||||
*/
|
||||
u8 mso_stream_count;
|
||||
|
||||
/**
|
||||
* @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
|
||||
*/
|
||||
u8 mso_pixel_overlap;
|
||||
};
|
||||
|
||||
int drm_display_info_set_bus_formats(struct drm_display_info *info,
|
||||
@ -819,12 +781,6 @@ struct drm_connector_state {
|
||||
*/
|
||||
u8 max_bpc;
|
||||
|
||||
/**
|
||||
* @privacy_screen_sw_state: See :ref:`Standard Connector
|
||||
* Properties<standard_connector_properties>`
|
||||
*/
|
||||
enum drm_privacy_screen_status privacy_screen_sw_state;
|
||||
|
||||
/**
|
||||
* @hdr_output_metadata:
|
||||
* DRM blob property for HDR output metadata
|
||||
@ -1128,14 +1084,6 @@ struct drm_connector_funcs {
|
||||
*/
|
||||
void (*atomic_print_state)(struct drm_printer *p,
|
||||
const struct drm_connector_state *state);
|
||||
|
||||
/**
|
||||
* @oob_hotplug_event:
|
||||
*
|
||||
* This will get called when a hotplug-event for a drm-connector
|
||||
* has been received from a source outside the display driver / device.
|
||||
*/
|
||||
void (*oob_hotplug_event)(struct drm_connector *connector);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1280,14 +1228,6 @@ struct drm_connector {
|
||||
struct device *kdev;
|
||||
/** @attr: sysfs attributes */
|
||||
struct device_attribute *attr;
|
||||
/**
|
||||
* @fwnode: associated fwnode supplied by platform firmware
|
||||
*
|
||||
* Drivers can set this to associate a fwnode with a connector, drivers
|
||||
* are expected to get a reference on the fwnode when setting this.
|
||||
* drm_connector_cleanup() will call fwnode_handle_put() on this.
|
||||
*/
|
||||
struct fwnode_handle *fwnode;
|
||||
|
||||
/**
|
||||
* @head:
|
||||
@ -1299,14 +1239,6 @@ struct drm_connector {
|
||||
*/
|
||||
struct list_head head;
|
||||
|
||||
/**
|
||||
* @global_connector_list_entry:
|
||||
*
|
||||
* Connector entry in the global connector-list, used by
|
||||
* drm_connector_find_by_fwnode().
|
||||
*/
|
||||
struct list_head global_connector_list_entry;
|
||||
|
||||
/** @base: base KMS object */
|
||||
struct drm_mode_object base;
|
||||
|
||||
@ -1453,24 +1385,6 @@ struct drm_connector {
|
||||
*/
|
||||
struct drm_property *max_bpc_property;
|
||||
|
||||
/** @privacy_screen: drm_privacy_screen for this connector, or NULL. */
|
||||
struct drm_privacy_screen *privacy_screen;
|
||||
|
||||
/** @privacy_screen_notifier: privacy-screen notifier_block */
|
||||
struct notifier_block privacy_screen_notifier;
|
||||
|
||||
/**
|
||||
* @privacy_screen_sw_state_property: Optional atomic property for the
|
||||
* connector to control the integrated privacy screen.
|
||||
*/
|
||||
struct drm_property *privacy_screen_sw_state_property;
|
||||
|
||||
/**
|
||||
* @privacy_screen_hw_state_property: Optional atomic property for the
|
||||
* connector to report the actual integrated privacy screen state.
|
||||
*/
|
||||
struct drm_property *privacy_screen_hw_state_property;
|
||||
|
||||
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
|
||||
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
|
||||
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
|
||||
@ -1736,7 +1650,6 @@ drm_connector_is_unregistered(struct drm_connector *connector)
|
||||
DRM_CONNECTOR_UNREGISTERED;
|
||||
}
|
||||
|
||||
void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
|
||||
const char *drm_get_connector_type_name(unsigned int connector_type);
|
||||
const char *drm_get_connector_status_name(enum drm_connector_status status);
|
||||
const char *drm_get_subpixel_order_name(enum subpixel_order order);
|
||||
@ -1794,11 +1707,6 @@ int drm_connector_set_panel_orientation_with_quirk(
|
||||
int width, int height);
|
||||
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
|
||||
int min, int max);
|
||||
void drm_connector_create_privacy_screen_properties(struct drm_connector *conn);
|
||||
void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn);
|
||||
void drm_connector_attach_privacy_screen_provider(
|
||||
struct drm_connector *connector, struct drm_privacy_screen *priv);
|
||||
void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state);
|
||||
|
||||
/**
|
||||
* struct drm_tile_group - Tile group metadata
|
||||
|
@ -6,13 +6,16 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#include <drm/drm_legacy.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <drm/drm_mode_config.h>
|
||||
|
||||
struct drm_driver;
|
||||
struct drm_minor;
|
||||
struct drm_master;
|
||||
struct drm_device_dma;
|
||||
struct drm_vblank_crtc;
|
||||
struct drm_sg_mem;
|
||||
struct drm_local_map;
|
||||
struct drm_vma_offset_manager;
|
||||
struct drm_vram_mm;
|
||||
struct drm_fb_helper;
|
||||
|
@ -23,71 +23,38 @@
|
||||
#define DRM_DISPLAYID_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
|
||||
struct edid;
|
||||
|
||||
#define VESA_IEEE_OUI 0x3a0292
|
||||
#define DATA_BLOCK_PRODUCT_ID 0x00
|
||||
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
|
||||
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
|
||||
#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
|
||||
#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
|
||||
#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
|
||||
#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
|
||||
#define DATA_BLOCK_VESA_TIMING 0x07
|
||||
#define DATA_BLOCK_CEA_TIMING 0x08
|
||||
#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
|
||||
#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
|
||||
#define DATA_BLOCK_GP_ASCII_STRING 0x0b
|
||||
#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
|
||||
#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
|
||||
#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
|
||||
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
|
||||
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
|
||||
#define DATA_BLOCK_TILED_DISPLAY 0x12
|
||||
#define DATA_BLOCK_CTA 0x81
|
||||
|
||||
/* DisplayID Structure versions */
|
||||
#define DISPLAY_ID_STRUCTURE_VER_12 0x12
|
||||
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
|
||||
#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
|
||||
|
||||
/* DisplayID Structure v1r2 Data Blocks */
|
||||
#define DATA_BLOCK_PRODUCT_ID 0x00
|
||||
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
|
||||
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
|
||||
#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
|
||||
#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
|
||||
#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
|
||||
#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
|
||||
#define DATA_BLOCK_VESA_TIMING 0x07
|
||||
#define DATA_BLOCK_CEA_TIMING 0x08
|
||||
#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
|
||||
#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
|
||||
#define DATA_BLOCK_GP_ASCII_STRING 0x0b
|
||||
#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
|
||||
#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
|
||||
#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
|
||||
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
|
||||
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
|
||||
#define DATA_BLOCK_TILED_DISPLAY 0x12
|
||||
#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
|
||||
#define DATA_BLOCK_CTA 0x81
|
||||
|
||||
/* DisplayID Structure v2r0 Data Blocks */
|
||||
#define DATA_BLOCK_2_PRODUCT_ID 0x20
|
||||
#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21
|
||||
#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22
|
||||
#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23
|
||||
#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24
|
||||
#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25
|
||||
#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26
|
||||
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
|
||||
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
|
||||
#define DATA_BLOCK_2_CONTAINER_ID 0x29
|
||||
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
|
||||
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
|
||||
|
||||
/* DisplayID Structure v1r2 Product Type */
|
||||
#define PRODUCT_TYPE_EXTENSION 0
|
||||
#define PRODUCT_TYPE_TEST 1
|
||||
#define PRODUCT_TYPE_PANEL 2
|
||||
#define PRODUCT_TYPE_MONITOR 3
|
||||
#define PRODUCT_TYPE_TV 4
|
||||
#define PRODUCT_TYPE_REPEATER 5
|
||||
#define PRODUCT_TYPE_DIRECT_DRIVE 6
|
||||
|
||||
/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */
|
||||
#define PRIMARY_USE_EXTENSION 0
|
||||
#define PRIMARY_USE_TEST 1
|
||||
#define PRIMARY_USE_GENERIC 2
|
||||
#define PRIMARY_USE_TV 3
|
||||
#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4
|
||||
#define PRIMARY_USE_DESKTOP_GAMING 5
|
||||
#define PRIMARY_USE_PRESENTATION 6
|
||||
#define PRIMARY_USE_HEAD_MOUNTED_VR 7
|
||||
#define PRIMARY_USE_HEAD_MOUNTED_AR 8
|
||||
#define PRODUCT_TYPE_EXTENSION 0
|
||||
#define PRODUCT_TYPE_TEST 1
|
||||
#define PRODUCT_TYPE_PANEL 2
|
||||
#define PRODUCT_TYPE_MONITOR 3
|
||||
#define PRODUCT_TYPE_TV 4
|
||||
#define PRODUCT_TYPE_REPEATER 5
|
||||
#define PRODUCT_TYPE_DIRECT_DRIVE 6
|
||||
|
||||
struct displayid_header {
|
||||
u8 rev;
|
||||
@ -129,16 +96,6 @@ struct displayid_detailed_timing_block {
|
||||
struct displayid_detailed_timings_1 timings[];
|
||||
};
|
||||
|
||||
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
|
||||
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
|
||||
|
||||
struct displayid_vesa_vendor_specific_block {
|
||||
struct displayid_block base;
|
||||
u8 oui[3];
|
||||
u8 data_structure_type;
|
||||
u8 mso;
|
||||
} __packed;
|
||||
|
||||
/* DisplayID iteration */
|
||||
struct displayid_iter {
|
||||
const struct edid *edid;
|
||||
|
@ -453,7 +453,6 @@ struct drm_panel;
|
||||
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
|
||||
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
|
||||
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
|
||||
#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
|
||||
|
||||
/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
|
||||
#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
|
||||
@ -538,9 +537,6 @@ struct drm_panel;
|
||||
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
|
||||
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
|
||||
|
||||
/* DFP Capability Extension */
|
||||
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
|
||||
|
||||
/* Link Configuration */
|
||||
#define DP_LINK_BW_SET 0x100
|
||||
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
|
||||
@ -692,7 +688,6 @@ struct drm_panel;
|
||||
|
||||
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
|
||||
# define DP_DECOMPRESSION_EN (1 << 0)
|
||||
#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
|
||||
|
||||
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
|
||||
# define DP_PSR_ENABLE BIT(0)
|
||||
@ -748,7 +743,6 @@ struct drm_panel;
|
||||
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
|
||||
# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
|
||||
# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
|
||||
# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
|
||||
|
||||
#define DP_ADJUST_REQUEST_LANE0_1 0x206
|
||||
#define DP_ADJUST_REQUEST_LANE2_3 0x207
|
||||
@ -871,8 +865,6 @@ struct drm_panel;
|
||||
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
|
||||
# define DP_PHY_TEST_PATTERN_CP2520 0x5
|
||||
|
||||
#define DP_PHY_SQUARE_PATTERN 0x249
|
||||
|
||||
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
|
||||
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
|
||||
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
|
||||
@ -1114,27 +1106,8 @@ struct drm_panel;
|
||||
# define DP_UHBR20 (1 << 1)
|
||||
# define DP_UHBR13_5 (1 << 2)
|
||||
|
||||
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06
|
||||
|
||||
#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
|
||||
#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
|
||||
|
||||
/* DSC Extended Capability Branch Total DSC Resources */
|
||||
#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
|
||||
# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
|
||||
# define DP_DSC_DECODER_COUNT_SHIFT 5
|
||||
#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
|
||||
# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
|
||||
# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
|
||||
# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
|
||||
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
|
||||
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
|
||||
|
||||
/* Protocol Converter Extension */
|
||||
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
|
||||
@ -1346,10 +1319,6 @@ struct drm_panel;
|
||||
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
|
||||
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
|
||||
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
|
||||
#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
|
||||
# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
|
||||
/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
|
||||
#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
|
||||
|
||||
enum drm_dp_phy {
|
||||
DP_PHY_DPRX,
|
||||
@ -1396,11 +1365,6 @@ enum drm_dp_phy {
|
||||
# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0)
|
||||
# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1)
|
||||
|
||||
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */
|
||||
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
|
||||
/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */
|
||||
|
||||
#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
|
||||
#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
|
||||
@ -1526,8 +1490,6 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane);
|
||||
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane);
|
||||
u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane);
|
||||
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
unsigned int lane);
|
||||
|
||||
@ -1539,11 +1501,6 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
|
||||
#define DP_LTTPR_COMMON_CAP_SIZE 8
|
||||
#define DP_LTTPR_PHY_CAP_SIZE 3
|
||||
|
||||
int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
enum drm_dp_phy dp_phy, bool uhbr);
|
||||
int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
enum drm_dp_phy dp_phy, bool uhbr);
|
||||
|
||||
void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
|
||||
const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_lttpr_link_train_clock_recovery_delay(void);
|
||||
@ -1783,13 +1740,6 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
return dpcd[DP_DPCD_REV] >= 0x11 ||
|
||||
dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
@ -1875,7 +1825,7 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
*
|
||||
* Note that currently this function will return %false for panels which support various DPCD
|
||||
* backlight features but which require the brightness be set through PWM, and don't support setting
|
||||
* the brightness level via the DPCD.
|
||||
* the brightness level via the DPCD. This is a TODO.
|
||||
*
|
||||
* Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false
|
||||
* otherwise
|
||||
@ -1883,7 +1833,8 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
static inline bool
|
||||
drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
|
||||
{
|
||||
return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
|
||||
return (edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) &&
|
||||
(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2244,7 +2195,6 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
|
||||
* @max: The maximum backlight level that may be set
|
||||
* @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
|
||||
* @aux_enable: Does the panel support the AUX enable cap?
|
||||
* @aux_set: Does the panel support setting the brightness through AUX?
|
||||
*
|
||||
* This structure contains various data about an eDP backlight, which can be populated by using
|
||||
* drm_edp_backlight_init().
|
||||
@ -2256,7 +2206,6 @@ struct drm_edp_backlight_info {
|
||||
|
||||
bool lsb_reg_used : 1;
|
||||
bool aux_enable : 1;
|
||||
bool aux_set : 1;
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -554,8 +554,6 @@ struct drm_dp_mst_topology_state {
|
||||
struct drm_private_state base;
|
||||
struct list_head vcpis;
|
||||
struct drm_dp_mst_topology_mgr *mgr;
|
||||
u8 total_avail_slots;
|
||||
u8 start_slot;
|
||||
};
|
||||
|
||||
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
|
||||
@ -808,7 +806,6 @@ int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
|
||||
|
||||
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
|
||||
|
||||
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
|
||||
|
||||
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port);
|
||||
@ -818,7 +815,7 @@ int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
|
||||
int pbn);
|
||||
|
||||
|
||||
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
|
||||
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
|
||||
|
||||
|
||||
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
|
||||
|
@ -291,9 +291,8 @@ struct drm_driver {
|
||||
/**
|
||||
* @gem_create_object: constructor for gem objects
|
||||
*
|
||||
* Hook for allocating the GEM object struct, for use by the CMA
|
||||
* and SHMEM GEM helpers. Returns a GEM object on success, or an
|
||||
* ERR_PTR()-encoded error code otherwise.
|
||||
* Hook for allocating the GEM object struct, for use by the CMA and
|
||||
* SHMEM GEM helpers.
|
||||
*/
|
||||
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
|
||||
size_t size);
|
||||
@ -346,14 +345,11 @@ struct drm_driver {
|
||||
* mmap hook for GEM drivers, used to implement dma-buf mmap in the
|
||||
* PRIME helpers.
|
||||
*
|
||||
* This hook only exists for historical reasons. Drivers must use
|
||||
* drm_gem_prime_mmap() to implement it.
|
||||
*
|
||||
* FIXME: Convert all drivers to implement mmap in struct
|
||||
* &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
|
||||
* its callers. This hook should be removed afterwards.
|
||||
* FIXME: There's way too much duplication going on here, and also moved
|
||||
* to &drm_gem_object_funcs.
|
||||
*/
|
||||
int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
int (*gem_prime_mmap)(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* @dumb_create:
|
||||
@ -602,6 +598,5 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
|
||||
|
||||
int drm_dev_set_unique(struct drm_device *dev, const char *name);
|
||||
|
||||
extern bool drm_firmware_drivers_only(void);
|
||||
|
||||
#endif
|
||||
|
@ -508,52 +508,6 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
|
||||
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
|
||||
* @vend_chr_0: First character of the vendor string.
|
||||
* @vend_chr_1: Second character of the vendor string.
|
||||
* @vend_chr_2: Third character of the vendor string.
|
||||
* @product_id: The 16-bit product ID.
|
||||
*
|
||||
* This is a macro so that it can be calculated at compile time and used
|
||||
* as an initializer.
|
||||
*
|
||||
* For instance:
|
||||
* drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
|
||||
*
|
||||
* Return: a 32-bit ID per panel.
|
||||
*/
|
||||
#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
|
||||
((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
|
||||
(((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
|
||||
(((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
|
||||
((product_id) & 0xffff))
|
||||
|
||||
/**
|
||||
* drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
|
||||
* @panel_id: The panel ID to decode.
|
||||
* @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
|
||||
* termination
|
||||
* @product_id: The product ID will be returned here.
|
||||
*
|
||||
* For instance, after:
|
||||
* drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
|
||||
* These will be true:
|
||||
* vend[0] = 'B'
|
||||
* vend[1] = 'O'
|
||||
* vend[2] = 'E'
|
||||
* vend[3] = '\0'
|
||||
* product_id = 0x2d08
|
||||
*/
|
||||
static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
|
||||
{
|
||||
*product_id = (u16)(panel_id & 0xffff);
|
||||
vend[0] = '@' + ((panel_id >> 26) & 0x1f);
|
||||
vend[1] = '@' + ((panel_id >> 21) & 0x1f);
|
||||
vend[2] = '@' + ((panel_id >> 16) & 0x1f);
|
||||
vend[3] = '\0';
|
||||
}
|
||||
|
||||
bool drm_probe_ddc(struct i2c_adapter *adapter);
|
||||
struct edid *drm_do_get_edid(struct drm_connector *connector,
|
||||
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
|
||||
@ -561,7 +515,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
|
||||
void *data);
|
||||
struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter);
|
||||
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
|
||||
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter);
|
||||
struct edid *drm_edid_duplicate(const struct edid *edid);
|
||||
|
@ -6,41 +6,34 @@
|
||||
#ifndef __LINUX_DRM_FORMAT_HELPER_H
|
||||
#define __LINUX_DRM_FORMAT_HELPER_H
|
||||
|
||||
struct drm_format_info;
|
||||
struct drm_framebuffer;
|
||||
struct drm_rect;
|
||||
|
||||
unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
|
||||
const struct drm_rect *clip);
|
||||
void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip);
|
||||
void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vaddr,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip);
|
||||
void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip, bool cached);
|
||||
void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip, bool swab);
|
||||
void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
|
||||
void *vaddr, struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip, bool swab);
|
||||
void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
|
||||
void *vaddr, struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip);
|
||||
void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
|
||||
struct drm_rect *clip);
|
||||
|
||||
void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip);
|
||||
void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip);
|
||||
void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip,
|
||||
bool cached);
|
||||
void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip);
|
||||
void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip,
|
||||
bool swab);
|
||||
void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch,
|
||||
const void *vaddr, const struct drm_framebuffer *fb,
|
||||
const struct drm_rect *clip, bool swab);
|
||||
void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip);
|
||||
void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
|
||||
const void *vaddr, const struct drm_framebuffer *fb,
|
||||
const struct drm_rect *clip);
|
||||
void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, unsigned int dst_pitch,
|
||||
const void *vaddr, const struct drm_framebuffer *fb,
|
||||
const struct drm_rect *clip);
|
||||
void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
|
||||
const struct drm_framebuffer *fb, const struct drm_rect *clip);
|
||||
|
||||
int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format,
|
||||
const void *vmap, const struct drm_framebuffer *fb,
|
||||
const struct drm_rect *rect);
|
||||
int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch,
|
||||
uint32_t dst_format, void *vmap,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_rect *rect);
|
||||
int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch,
|
||||
uint32_t dst_format, void *vmap,
|
||||
struct drm_framebuffer *fb);
|
||||
|
||||
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
|
||||
|
@ -22,24 +22,6 @@ int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
|
||||
* Helpers for planes with shadow buffers
|
||||
*/
|
||||
|
||||
/**
|
||||
* DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels
|
||||
*
|
||||
* For drivers with shadow planes, the maximum width of the framebuffer is
|
||||
* usually independent from hardware limitations. Drivers can initialize struct
|
||||
* drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH.
|
||||
*/
|
||||
#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u)
|
||||
|
||||
/**
|
||||
* DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines
|
||||
*
|
||||
* For drivers with shadow planes, the maximum height of the framebuffer is
|
||||
* usually independent from hardware limitations. Drivers can initialize struct
|
||||
* drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT.
|
||||
*/
|
||||
#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u)
|
||||
|
||||
/**
|
||||
* struct drm_shadow_plane_state - plane state for planes with shadow buffers
|
||||
*
|
||||
|
@ -32,108 +32,42 @@ struct drm_gem_cma_object {
|
||||
#define to_drm_gem_cma_obj(gem_obj) \
|
||||
container_of(gem_obj, struct drm_gem_cma_object, base)
|
||||
|
||||
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
|
||||
size_t size);
|
||||
void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
|
||||
void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
|
||||
struct drm_printer *p, unsigned int indent);
|
||||
struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
|
||||
int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map);
|
||||
int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
|
||||
|
||||
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
|
||||
|
||||
/*
|
||||
* GEM object functions
|
||||
*/
|
||||
#ifndef CONFIG_MMU
|
||||
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
|
||||
.get_unmapped_area = drm_gem_cma_get_unmapped_area,
|
||||
#else
|
||||
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
|
||||
#endif
|
||||
|
||||
/**
|
||||
* drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
|
||||
* @obj: GEM object to free
|
||||
* DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
|
||||
* @name: name for the generated structure
|
||||
*
|
||||
* This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
|
||||
* should use it as their &drm_gem_object_funcs.free handler.
|
||||
* This macro autogenerates a suitable &struct file_operations for CMA based
|
||||
* drivers, which can be assigned to &drm_driver.fops. Note that this structure
|
||||
* cannot be shared between drivers, because it contains a reference to the
|
||||
* current module using THIS_MODULE.
|
||||
*
|
||||
* Note that the declaration is already marked as static - if you need a
|
||||
* non-static version of this you're probably doing it wrong and will break the
|
||||
* THIS_MODULE reference by accident.
|
||||
*/
|
||||
static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
|
||||
#define DEFINE_DRM_GEM_CMA_FOPS(name) \
|
||||
static const struct file_operations name = {\
|
||||
.owner = THIS_MODULE,\
|
||||
.open = drm_open,\
|
||||
.release = drm_release,\
|
||||
.unlocked_ioctl = drm_ioctl,\
|
||||
.compat_ioctl = drm_compat_ioctl,\
|
||||
.poll = drm_poll,\
|
||||
.read = drm_read,\
|
||||
.llseek = noop_llseek,\
|
||||
.mmap = drm_gem_mmap,\
|
||||
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
|
||||
}
|
||||
|
||||
drm_gem_cma_free(cma_obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs
|
||||
* @p: DRM printer
|
||||
* @indent: Tab indentation level
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers
|
||||
* should use this function as their &drm_gem_object_funcs.print_info handler.
|
||||
*/
|
||||
static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
|
||||
const struct drm_gem_object *obj)
|
||||
{
|
||||
const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
|
||||
|
||||
drm_gem_cma_print_info(cma_obj, p, indent);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table()
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should
|
||||
* use it as their &drm_gem_object_funcs.get_sg_table handler.
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
|
||||
*/
|
||||
static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
|
||||
|
||||
return drm_gem_cma_get_sg_table(cma_obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap()
|
||||
* @obj: GEM object
|
||||
* @map: Returns the kernel virtual address of the CMA GEM object's backing store.
|
||||
*
|
||||
* This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should
|
||||
* use it as their &drm_gem_object_funcs.vmap handler.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
|
||||
|
||||
return drm_gem_cma_vmap(cma_obj, map);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap()
|
||||
* @obj: GEM object
|
||||
* @vma: VMA for the area to be mapped
|
||||
*
|
||||
* This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should
|
||||
* use it as their &drm_gem_object_funcs.mmap handler.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
|
||||
|
||||
return drm_gem_cma_mmap(cma_obj, vma);
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver ops
|
||||
*/
|
||||
/* free GEM object */
|
||||
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
|
||||
|
||||
/* create memory region for DRM framebuffer */
|
||||
int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
|
||||
@ -145,10 +79,30 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *drm,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
/* allocate physical memory */
|
||||
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
|
||||
size_t size);
|
||||
|
||||
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
|
||||
unsigned long addr,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
#endif
|
||||
|
||||
void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
|
||||
const struct drm_gem_object *obj);
|
||||
|
||||
struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *
|
||||
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
|
||||
@ -231,47 +185,4 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
|
||||
/*
|
||||
* File ops
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
|
||||
unsigned long addr,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
|
||||
.get_unmapped_area = drm_gem_cma_get_unmapped_area,
|
||||
#else
|
||||
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
|
||||
* @name: name for the generated structure
|
||||
*
|
||||
* This macro autogenerates a suitable &struct file_operations for CMA based
|
||||
* drivers, which can be assigned to &drm_driver.fops. Note that this structure
|
||||
* cannot be shared between drivers, because it contains a reference to the
|
||||
* current module using THIS_MODULE.
|
||||
*
|
||||
* Note that the declaration is already marked as static - if you need a
|
||||
* non-static version of this you're probably doing it wrong and will break the
|
||||
* THIS_MODULE reference by accident.
|
||||
*/
|
||||
#define DEFINE_DRM_GEM_CMA_FOPS(name) \
|
||||
static const struct file_operations name = {\
|
||||
.owner = THIS_MODULE,\
|
||||
.open = drm_open,\
|
||||
.release = drm_release,\
|
||||
.unlocked_ioctl = drm_ioctl,\
|
||||
.compat_ioctl = drm_compat_ioctl,\
|
||||
.poll = drm_poll,\
|
||||
.read = drm_read,\
|
||||
.llseek = noop_llseek,\
|
||||
.mmap = drm_gem_mmap,\
|
||||
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
|
||||
}
|
||||
|
||||
#endif /* __DRM_GEM_CMA_HELPER_H__ */
|
||||
|
@ -107,17 +107,16 @@ struct drm_gem_shmem_object {
|
||||
container_of(obj, struct drm_gem_shmem_object, base)
|
||||
|
||||
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
|
||||
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
|
||||
void drm_gem_shmem_free_object(struct drm_gem_object *obj);
|
||||
|
||||
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
|
||||
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
|
||||
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
|
||||
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
|
||||
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map);
|
||||
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map);
|
||||
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
|
||||
int drm_gem_shmem_pin(struct drm_gem_object *obj);
|
||||
void drm_gem_shmem_unpin(struct drm_gem_object *obj);
|
||||
int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
|
||||
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
|
||||
int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
|
||||
|
||||
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
|
||||
{
|
||||
@ -126,156 +125,29 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
|
||||
!shmem->base.dma_buf && !shmem->base.import_attach;
|
||||
}
|
||||
|
||||
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
|
||||
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
|
||||
void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
|
||||
bool drm_gem_shmem_purge(struct drm_gem_object *obj);
|
||||
|
||||
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
|
||||
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
|
||||
struct drm_gem_shmem_object *
|
||||
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
|
||||
struct drm_device *dev, size_t size,
|
||||
uint32_t *handle);
|
||||
|
||||
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
|
||||
struct drm_printer *p, unsigned int indent);
|
||||
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
/*
|
||||
* GEM object functions
|
||||
*/
|
||||
int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
|
||||
* @obj: GEM object to free
|
||||
*
|
||||
* This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
|
||||
* should use it as their &drm_gem_object_funcs.free handler.
|
||||
*/
|
||||
static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
drm_gem_shmem_free(shmem);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
|
||||
* @p: DRM printer
|
||||
* @indent: Tab indentation level
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
|
||||
* use this function as their &drm_gem_object_funcs.print_info handler.
|
||||
*/
|
||||
static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
|
||||
const struct drm_gem_object *obj)
|
||||
{
|
||||
const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
drm_gem_shmem_print_info(shmem, p, indent);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.pin handler.
|
||||
*/
|
||||
static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
return drm_gem_shmem_pin(shmem);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.unpin handler.
|
||||
*/
|
||||
static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
drm_gem_shmem_unpin(shmem);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
|
||||
* @obj: GEM object
|
||||
*
|
||||
* This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.get_sg_table handler.
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
|
||||
*/
|
||||
static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
return drm_gem_shmem_get_sg_table(shmem);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
|
||||
* @obj: GEM object
|
||||
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
|
||||
*
|
||||
* This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.vmap handler.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
return drm_gem_shmem_vmap(shmem, map);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
|
||||
* @obj: GEM object
|
||||
* @map: Kernel virtual address where the SHMEM GEM object was mapped
|
||||
*
|
||||
* This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.vunmap handler.
|
||||
*/
|
||||
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
drm_gem_shmem_vunmap(shmem, map);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
|
||||
* @obj: GEM object
|
||||
* @vma: VMA for the area to be mapped
|
||||
*
|
||||
* This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
|
||||
* use it as their &drm_gem_object_funcs.mmap handler.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
|
||||
return drm_gem_shmem_mmap(shmem, vma);
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver ops
|
||||
*/
|
||||
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
|
||||
const struct drm_gem_object *obj);
|
||||
|
||||
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *
|
||||
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
|
||||
|
||||
/**
|
||||
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
|
||||
|
@ -3,7 +3,7 @@
|
||||
#ifndef DRM_GEM_TTM_HELPER_H
|
||||
#define DRM_GEM_TTM_HELPER_H
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_gem.h>
|
||||
|
@ -11,8 +11,8 @@
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/dma-buf-map.h>
|
||||
#include <linux/kernel.h> /* for container_of() */
|
||||
|
||||
struct drm_mode_create_dumb;
|
||||
struct drm_plane;
|
||||
|
@ -49,17 +49,17 @@ struct drm_open_hash {
|
||||
u8 order;
|
||||
};
|
||||
|
||||
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
|
||||
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
|
||||
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add);
|
||||
extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
|
||||
int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
|
||||
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
|
||||
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
|
||||
unsigned long seed, int bits, int shift,
|
||||
unsigned long add);
|
||||
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
|
||||
|
||||
extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
|
||||
extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
|
||||
extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
|
||||
extern void drm_ht_remove(struct drm_open_hash *ht);
|
||||
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
|
||||
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
|
||||
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
|
||||
void drm_ht_remove(struct drm_open_hash *ht);
|
||||
|
||||
/*
|
||||
* RCU-safe interface
|
||||
|
@ -167,6 +167,7 @@ struct drm_ioctl_desc {
|
||||
.name = #ioctl \
|
||||
}
|
||||
|
||||
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
|
||||
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
|
||||
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include <drm/drm_auth.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
|
||||
struct drm_device;
|
||||
struct drm_driver;
|
||||
@ -50,20 +51,6 @@ struct pci_driver;
|
||||
* you're doing it terribly wrong.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Hash-table Support
|
||||
*/
|
||||
|
||||
struct drm_hash_item {
|
||||
struct hlist_node head;
|
||||
unsigned long key;
|
||||
};
|
||||
|
||||
struct drm_open_hash {
|
||||
struct hlist_head *table;
|
||||
u8 order;
|
||||
};
|
||||
|
||||
/**
|
||||
* DMA buffer.
|
||||
*/
|
||||
|
@ -241,13 +241,9 @@ struct mipi_dsi_device *
|
||||
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
|
||||
const struct mipi_dsi_device_info *info);
|
||||
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
|
||||
struct mipi_dsi_device *
|
||||
devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
|
||||
const struct mipi_dsi_device_info *info);
|
||||
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
|
||||
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
|
||||
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
|
||||
int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
|
||||
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
|
||||
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
|
||||
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
|
||||
|
@ -39,15 +39,13 @@
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#ifdef CONFIG_DRM_DEBUG_MM
|
||||
#include <linux/stackdepot.h>
|
||||
#endif
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#ifdef CONFIG_DRM_DEBUG_MM
|
||||
|
@ -103,13 +103,14 @@ struct drm_mode_config_funcs {
|
||||
* Callback used by helpers to inform the driver of output configuration
|
||||
* changes.
|
||||
*
|
||||
* Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
|
||||
* to call this hook to inform the fbdev helper of output changes.
|
||||
* Drivers implementing fbdev emulation with the helpers can call
|
||||
* drm_fb_helper_hotplug_changed from this hook to inform the fbdev
|
||||
* helper of output changes.
|
||||
*
|
||||
* This hook is deprecated, drivers should instead use
|
||||
* drm_fbdev_generic_setup() which takes care of any necessary
|
||||
* hotplug event forwarding already without further involvement by
|
||||
* the driver.
|
||||
* FIXME:
|
||||
*
|
||||
* Except that there's no vtable for device-level helper callbacks
|
||||
* there's no reason this is a core function.
|
||||
*/
|
||||
void (*output_poll_changed)(struct drm_device *dev);
|
||||
|
||||
@ -359,19 +360,6 @@ struct drm_mode_config_funcs {
|
||||
* Core mode resource tracking structure. All CRTC, encoders, and connectors
|
||||
* enumerated by the driver are added here, as are global properties. Some
|
||||
* global restrictions are also here, e.g. dimension restrictions.
|
||||
*
|
||||
* Framebuffer sizes refer to the virtual screen that can be displayed by
|
||||
* the CRTC. This can be different from the physical resolution programmed.
|
||||
* The minimum width and height, stored in @min_width and @min_height,
|
||||
* describe the smallest size of the framebuffer. It correlates to the
|
||||
* minimum programmable resolution.
|
||||
* The maximum width, stored in @max_width, is typically limited by the
|
||||
* maximum pitch between two adjacent scanlines. The maximum height, stored
|
||||
* in @max_height, is usually only limited by the amount of addressable video
|
||||
* memory. For hardware that has no real maximum, drivers should pick a
|
||||
* reasonable default.
|
||||
*
|
||||
* See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT.
|
||||
*/
|
||||
struct drm_mode_config {
|
||||
/**
|
||||
|
@ -24,8 +24,6 @@
|
||||
#ifndef DRM_MODESET_LOCK_H_
|
||||
#define DRM_MODESET_LOCK_H_
|
||||
|
||||
#include <linux/types.h> /* stackdepot.h is not self-contained */
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/ww_mutex.h>
|
||||
|
||||
struct drm_modeset_lock;
|
||||
@ -53,12 +51,6 @@ struct drm_modeset_acquire_ctx {
|
||||
*/
|
||||
struct drm_modeset_lock *contended;
|
||||
|
||||
/*
|
||||
* Stack depot for debugging when a contended lock was not backed off
|
||||
* from.
|
||||
*/
|
||||
depot_stack_handle_t stack_depot;
|
||||
|
||||
/*
|
||||
* list of held locks (drm_modeset_lock)
|
||||
*/
|
||||
|
@ -49,7 +49,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
|
||||
struct drm_bridge **bridge);
|
||||
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
|
||||
const struct device_node *port2);
|
||||
int drm_of_lvds_get_data_mapping(const struct device_node *port);
|
||||
#else
|
||||
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
|
||||
struct device_node *port)
|
||||
@ -99,12 +98,6 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
drm_of_lvds_get_data_mapping(const struct device_node *port)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -24,6 +24,7 @@
|
||||
#ifndef __DRM_PANEL_H__
|
||||
#define __DRM_PANEL_H__
|
||||
|
||||
#include <drm/drm_connector.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
@ -35,8 +36,6 @@ struct drm_device;
|
||||
struct drm_panel;
|
||||
struct display_timing;
|
||||
|
||||
enum drm_panel_orientation;
|
||||
|
||||
/**
|
||||
* struct drm_panel_funcs - perform operations on a given panel
|
||||
*
|
||||
@ -179,6 +178,16 @@ struct drm_panel {
|
||||
* Panel entry in registry.
|
||||
*/
|
||||
struct list_head list;
|
||||
|
||||
/**
|
||||
* @prepare_upstream_first:
|
||||
*
|
||||
* The upstream controller should be prepared first, before the prepare
|
||||
* for the panel is called. This is largely required for DSI panels
|
||||
* where the DSI host controller should be initialised to LP-11 before
|
||||
* the panel is powered up.
|
||||
*/
|
||||
bool prepare_upstream_first;
|
||||
};
|
||||
|
||||
void drm_panel_init(struct drm_panel *panel, struct device *dev,
|
||||
|
@ -43,7 +43,7 @@ enum drm_scaling_filter {
|
||||
/**
|
||||
* struct drm_plane_state - mutable plane state
|
||||
*
|
||||
* Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
|
||||
* Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
|
||||
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
|
||||
* raw coordinates provided by userspace. Drivers should use
|
||||
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
|
||||
|
@ -340,8 +340,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
/**
|
||||
* DRM_DEV_ERROR() - Error output.
|
||||
*
|
||||
* NOTE: this is deprecated in favor of drm_err() or dev_err().
|
||||
*
|
||||
* @dev: device pointer
|
||||
* @fmt: printf() like format string.
|
||||
*/
|
||||
@ -351,9 +349,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
/**
|
||||
* DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
|
||||
*
|
||||
* NOTE: this is deprecated in favor of drm_err_ratelimited() or
|
||||
* dev_err_ratelimited().
|
||||
*
|
||||
* @dev: device pointer
|
||||
* @fmt: printf() like format string.
|
||||
*
|
||||
@ -369,11 +364,9 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
|
||||
#define DRM_DEV_INFO(dev, fmt, ...) \
|
||||
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
|
||||
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
|
||||
({ \
|
||||
static bool __print_once __read_mostly; \
|
||||
@ -386,8 +379,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
/**
|
||||
* DRM_DEV_DEBUG() - Debug output for generic drm code
|
||||
*
|
||||
* NOTE: this is deprecated in favor of drm_dbg_core().
|
||||
*
|
||||
* @dev: device pointer
|
||||
* @fmt: printf() like format string.
|
||||
*/
|
||||
@ -396,8 +387,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
/**
|
||||
* DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
|
||||
*
|
||||
* NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
|
||||
*
|
||||
* @dev: device pointer
|
||||
* @fmt: printf() like format string.
|
||||
*/
|
||||
@ -406,8 +395,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
|
||||
/**
|
||||
* DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
|
||||
*
|
||||
* NOTE: this is deprecated in favor of drm_dbg_kms().
|
||||
*
|
||||
* @dev: device pointer
|
||||
* @fmt: printf() like format string.
|
||||
*/
|
||||
@ -493,63 +480,47 @@ void __drm_err(const char *format, ...);
|
||||
#define _DRM_PRINTK(once, level, fmt, ...) \
|
||||
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of pr_info(). */
|
||||
#define DRM_INFO(fmt, ...) \
|
||||
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
|
||||
/* NOTE: this is deprecated in favor of pr_notice(). */
|
||||
#define DRM_NOTE(fmt, ...) \
|
||||
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
|
||||
/* NOTE: this is deprecated in favor of pr_warn(). */
|
||||
#define DRM_WARN(fmt, ...) \
|
||||
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of pr_info_once(). */
|
||||
#define DRM_INFO_ONCE(fmt, ...) \
|
||||
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
|
||||
/* NOTE: this is deprecated in favor of pr_notice_once(). */
|
||||
#define DRM_NOTE_ONCE(fmt, ...) \
|
||||
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
|
||||
/* NOTE: this is deprecated in favor of pr_warn_once(). */
|
||||
#define DRM_WARN_ONCE(fmt, ...) \
|
||||
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of pr_err(). */
|
||||
#define DRM_ERROR(fmt, ...) \
|
||||
__drm_err(fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
|
||||
#define DRM_ERROR_RATELIMITED(fmt, ...) \
|
||||
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
|
||||
#define DRM_DEBUG(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
|
||||
#define DRM_DEBUG_DRIVER(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
|
||||
#define DRM_DEBUG_KMS(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
|
||||
#define DRM_DEBUG_PRIME(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
|
||||
#define DRM_DEBUG_ATOMIC(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
|
||||
#define DRM_DEBUG_VBL(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
|
||||
#define DRM_DEBUG_LEASE(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
|
||||
#define DRM_DEBUG_DP(fmt, ...) \
|
||||
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
|
||||
|
||||
@ -565,7 +536,6 @@ void __drm_err(const char *format, ...);
|
||||
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
|
||||
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
|
||||
|
||||
/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
|
||||
#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
|
||||
|
||||
/*
|
||||
|
@ -20,7 +20,6 @@ void drm_kms_helper_poll_fini(struct drm_device *dev);
|
||||
bool drm_helper_hpd_irq_event(struct drm_device *dev);
|
||||
bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
|
||||
void drm_kms_helper_hotplug_event(struct drm_device *dev);
|
||||
void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
|
||||
|
||||
void drm_kms_helper_poll_disable(struct drm_device *dev);
|
||||
void drm_kms_helper_poll_enable(struct drm_device *dev);
|
||||
|
@ -11,7 +11,6 @@ int drm_class_device_register(struct device *dev);
|
||||
void drm_class_device_unregister(struct device *dev);
|
||||
|
||||
void drm_sysfs_hotplug_event(struct drm_device *dev);
|
||||
void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
|
||||
void drm_sysfs_connector_status_event(struct drm_connector *connector,
|
||||
struct drm_property *property);
|
||||
#endif
|
||||
|
@ -27,13 +27,9 @@
|
||||
#include <drm/spsc_queue.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/irq_work.h>
|
||||
|
||||
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
|
||||
|
||||
struct drm_gem_object;
|
||||
|
||||
struct drm_gpu_scheduler;
|
||||
struct drm_sched_rq;
|
||||
|
||||
@ -54,147 +50,56 @@ enum drm_sched_priority {
|
||||
* struct drm_sched_entity - A wrapper around a job queue (typically
|
||||
* attached to the DRM file_priv).
|
||||
*
|
||||
* @list: used to append this struct to the list of entities in the
|
||||
* runqueue.
|
||||
* @rq: runqueue on which this entity is currently scheduled.
|
||||
* @sched_list: A list of schedulers (drm_gpu_schedulers).
|
||||
* Jobs from this entity can be scheduled on any scheduler
|
||||
* on this list.
|
||||
* @num_sched_list: number of drm_gpu_schedulers in the sched_list.
|
||||
* @priority: priority of the entity
|
||||
* @rq_lock: lock to modify the runqueue to which this entity belongs.
|
||||
* @job_queue: the list of jobs of this entity.
|
||||
* @fence_seq: a linearly increasing seqno incremented with each
|
||||
* new &drm_sched_fence which is part of the entity.
|
||||
* @fence_context: a unique context for all the fences which belong
|
||||
* to this entity.
|
||||
* The &drm_sched_fence.scheduled uses the
|
||||
* fence_context but &drm_sched_fence.finished uses
|
||||
* fence_context + 1.
|
||||
* @dependency: the dependency fence of the job which is on the top
|
||||
* of the job queue.
|
||||
* @cb: callback for the dependency fence above.
|
||||
* @guilty: points to ctx's guilty.
|
||||
* @fini_status: contains the exit status in case the process was signalled.
|
||||
* @last_scheduled: points to the finished fence of the last scheduled job.
|
||||
* @last_user: last group leader pushing a job into the entity.
|
||||
* @stopped: Marks the enity as removed from rq and destined for termination.
|
||||
* @entity_idle: Signals when enityt is not in use
|
||||
*
|
||||
* Entities will emit jobs in order to their corresponding hardware
|
||||
* ring, and the scheduler will alternate between entities based on
|
||||
* scheduling policy.
|
||||
*/
|
||||
struct drm_sched_entity {
|
||||
/**
|
||||
* @list:
|
||||
*
|
||||
* Used to append this struct to the list of entities in the runqueue
|
||||
* @rq under &drm_sched_rq.entities.
|
||||
*
|
||||
* Protected by &drm_sched_rq.lock of @rq.
|
||||
*/
|
||||
struct list_head list;
|
||||
|
||||
/**
|
||||
* @rq:
|
||||
*
|
||||
* Runqueue on which this entity is currently scheduled.
|
||||
*
|
||||
* FIXME: Locking is very unclear for this. Writers are protected by
|
||||
* @rq_lock, but readers are generally lockless and seem to just race
|
||||
* with not even a READ_ONCE.
|
||||
*/
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
/**
|
||||
* @sched_list:
|
||||
*
|
||||
* A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
|
||||
* be scheduled on any scheduler on this list.
|
||||
*
|
||||
* This can be modified by calling drm_sched_entity_modify_sched().
|
||||
* Locking is entirely up to the driver, see the above function for more
|
||||
* details.
|
||||
*
|
||||
* This will be set to NULL if &num_sched_list equals 1 and @rq has been
|
||||
* set already.
|
||||
*
|
||||
* FIXME: This means priority changes through
|
||||
* drm_sched_entity_set_priority() will be lost henceforth in this case.
|
||||
*/
|
||||
struct drm_gpu_scheduler **sched_list;
|
||||
|
||||
/**
|
||||
* @num_sched_list:
|
||||
*
|
||||
* Number of drm_gpu_schedulers in the @sched_list.
|
||||
*/
|
||||
unsigned int num_sched_list;
|
||||
|
||||
/**
|
||||
* @priority:
|
||||
*
|
||||
* Priority of the entity. This can be modified by calling
|
||||
* drm_sched_entity_set_priority(). Protected by &rq_lock.
|
||||
*/
|
||||
enum drm_sched_priority priority;
|
||||
|
||||
/**
|
||||
* @rq_lock:
|
||||
*
|
||||
* Lock to modify the runqueue to which this entity belongs.
|
||||
*/
|
||||
spinlock_t rq_lock;
|
||||
|
||||
/**
|
||||
* @job_queue: the list of jobs of this entity.
|
||||
*/
|
||||
struct spsc_queue job_queue;
|
||||
|
||||
/**
|
||||
* @fence_seq:
|
||||
*
|
||||
* A linearly increasing seqno incremented with each new
|
||||
* &drm_sched_fence which is part of the entity.
|
||||
*
|
||||
* FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
|
||||
* this doesn't need to be atomic.
|
||||
*/
|
||||
atomic_t fence_seq;
|
||||
|
||||
/**
|
||||
* @fence_context:
|
||||
*
|
||||
* A unique context for all the fences which belong to this entity. The
|
||||
* &drm_sched_fence.scheduled uses the fence_context but
|
||||
* &drm_sched_fence.finished uses fence_context + 1.
|
||||
*/
|
||||
uint64_t fence_context;
|
||||
|
||||
/**
|
||||
* @dependency:
|
||||
*
|
||||
* The dependency fence of the job which is on the top of the job queue.
|
||||
*/
|
||||
struct dma_fence *dependency;
|
||||
|
||||
/**
|
||||
* @cb:
|
||||
*
|
||||
* Callback for the dependency fence above.
|
||||
*/
|
||||
struct dma_fence_cb cb;
|
||||
|
||||
/**
|
||||
* @guilty:
|
||||
*
|
||||
* Points to entities' guilty.
|
||||
*/
|
||||
atomic_t *guilty;
|
||||
|
||||
/**
|
||||
* @last_scheduled:
|
||||
*
|
||||
* Points to the finished fence of the last scheduled job. Only written
|
||||
* by the scheduler thread, can be accessed locklessly from
|
||||
* drm_sched_job_arm() iff the queue is empty.
|
||||
*/
|
||||
struct dma_fence *last_scheduled;
|
||||
|
||||
/**
|
||||
* @last_user: last group leader pushing a job into the entity.
|
||||
*/
|
||||
struct task_struct *last_user;
|
||||
|
||||
/**
|
||||
* @stopped:
|
||||
*
|
||||
* Marks the enity as removed from rq and destined for
|
||||
* termination. This is set by calling drm_sched_entity_flush() and by
|
||||
* drm_sched_fini().
|
||||
*/
|
||||
bool stopped;
|
||||
|
||||
/**
|
||||
* @entity_idle:
|
||||
*
|
||||
* Signals when entity is not in use, used to sequence entity cleanup in
|
||||
* drm_sched_entity_fini().
|
||||
*/
|
||||
struct completion entity_idle;
|
||||
};
|
||||
|
||||
@ -287,32 +192,12 @@ struct drm_sched_job {
|
||||
struct list_head list;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_fence *s_fence;
|
||||
|
||||
/*
|
||||
* work is used only after finish_cb has been used and will not be
|
||||
* accessed anymore.
|
||||
*/
|
||||
union {
|
||||
struct dma_fence_cb finish_cb;
|
||||
struct irq_work work;
|
||||
};
|
||||
|
||||
struct dma_fence_cb finish_cb;
|
||||
uint64_t id;
|
||||
atomic_t karma;
|
||||
enum drm_sched_priority s_priority;
|
||||
struct drm_sched_entity *entity;
|
||||
struct dma_fence_cb cb;
|
||||
/**
|
||||
* @dependencies:
|
||||
*
|
||||
* Contains the dependencies as struct dma_fence for this job, see
|
||||
* drm_sched_job_add_dependency() and
|
||||
* drm_sched_job_add_implicit_dependencies().
|
||||
*/
|
||||
struct xarray dependencies;
|
||||
|
||||
/** @last_dependency: tracks @dependencies as they signal */
|
||||
unsigned long last_dependency;
|
||||
};
|
||||
|
||||
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
|
||||
@ -335,15 +220,9 @@ enum drm_gpu_sched_stat {
|
||||
*/
|
||||
struct drm_sched_backend_ops {
|
||||
/**
|
||||
* @dependency:
|
||||
*
|
||||
* Called when the scheduler is considering scheduling this job next, to
|
||||
* get another struct dma_fence for this job to block on. Once it
|
||||
* returns NULL, run_job() may be called.
|
||||
*
|
||||
* If a driver exclusively uses drm_sched_job_add_dependency() and
|
||||
* drm_sched_job_add_implicit_dependencies() this can be ommitted and
|
||||
* left as NULL.
|
||||
* @dependency: Called when the scheduler is considering scheduling
|
||||
* this job next, to get another struct dma_fence for this job to
|
||||
* block on. Once it returns NULL, run_job() may be called.
|
||||
*/
|
||||
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity);
|
||||
@ -469,14 +348,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
|
||||
int drm_sched_job_init(struct drm_sched_job *job,
|
||||
struct drm_sched_entity *entity,
|
||||
void *owner);
|
||||
void drm_sched_job_arm(struct drm_sched_job *job);
|
||||
int drm_sched_job_add_dependency(struct drm_sched_job *job,
|
||||
struct dma_fence *fence);
|
||||
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
|
||||
struct drm_gem_object *obj,
|
||||
bool write);
|
||||
|
||||
|
||||
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||
struct drm_gpu_scheduler **sched_list,
|
||||
unsigned int num_sched_list);
|
||||
@ -510,17 +381,14 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
|
||||
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
||||
enum drm_sched_priority priority);
|
||||
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_alloc(
|
||||
struct drm_sched_fence *drm_sched_fence_create(
|
||||
struct drm_sched_entity *s_entity, void *owner);
|
||||
void drm_sched_fence_init(struct drm_sched_fence *fence,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_fence_free(struct drm_sched_fence *fence);
|
||||
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
||||
void drm_sched_fence_finished(struct drm_sched_fence *fence);
|
||||
|
||||
|
@ -246,12 +246,10 @@ struct gud_state_req {
|
||||
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
|
||||
#define GUD_REQ_GET_FORMATS 0x40
|
||||
#define GUD_FORMATS_MAX_NUM 32
|
||||
#define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
|
||||
#define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
|
||||
/* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
|
||||
#define GUD_PIXEL_FORMAT_R1 0x01
|
||||
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
|
||||
#define GUD_PIXEL_FORMAT_RGB332 0x30
|
||||
#define GUD_PIXEL_FORMAT_RGB565 0x40
|
||||
#define GUD_PIXEL_FORMAT_RGB888 0x50
|
||||
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
|
||||
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
|
||||
|
||||
|
@ -29,7 +29,6 @@
|
||||
enum i915_component_type {
|
||||
I915_COMPONENT_AUDIO = 1,
|
||||
I915_COMPONENT_HDCP,
|
||||
I915_COMPONENT_PXP
|
||||
};
|
||||
|
||||
/* MAX_PORT is the number of port
|
||||
|
@ -632,16 +632,18 @@
|
||||
INTEL_VGA_DEVICE(0x4905, info), \
|
||||
INTEL_VGA_DEVICE(0x4906, info), \
|
||||
INTEL_VGA_DEVICE(0x4907, info), \
|
||||
INTEL_VGA_DEVICE(0x4908, info), \
|
||||
INTEL_VGA_DEVICE(0x4909, info)
|
||||
INTEL_VGA_DEVICE(0x4908, info)
|
||||
|
||||
/* ADL-S */
|
||||
#define INTEL_ADLS_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x4680, info), \
|
||||
INTEL_VGA_DEVICE(0x4681, info), \
|
||||
INTEL_VGA_DEVICE(0x4682, info), \
|
||||
INTEL_VGA_DEVICE(0x4683, info), \
|
||||
INTEL_VGA_DEVICE(0x4688, info), \
|
||||
INTEL_VGA_DEVICE(0x468A, info), \
|
||||
INTEL_VGA_DEVICE(0x4689, info), \
|
||||
INTEL_VGA_DEVICE(0x4690, info), \
|
||||
INTEL_VGA_DEVICE(0x4691, info), \
|
||||
INTEL_VGA_DEVICE(0x4692, info), \
|
||||
INTEL_VGA_DEVICE(0x4693, info)
|
||||
|
||||
@ -666,13 +668,4 @@
|
||||
INTEL_VGA_DEVICE(0x46C2, info), \
|
||||
INTEL_VGA_DEVICE(0x46C3, info)
|
||||
|
||||
/* RPL-S */
|
||||
#define INTEL_RPLS_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0xA780, info), \
|
||||
INTEL_VGA_DEVICE(0xA781, info), \
|
||||
INTEL_VGA_DEVICE(0xA782, info), \
|
||||
INTEL_VGA_DEVICE(0xA783, info), \
|
||||
INTEL_VGA_DEVICE(0xA788, info), \
|
||||
INTEL_VGA_DEVICE(0xA789, info)
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
@ -4,11 +4,9 @@
|
||||
#ifndef _DRM_INTEL_GTT_H
|
||||
#define _DRM_INTEL_GTT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct agp_bridge_data;
|
||||
struct pci_dev;
|
||||
struct sg_table;
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
void intel_gtt_get(u64 *gtt_total,
|
||||
phys_addr_t *mappable_base,
|
||||
|
@ -32,6 +32,7 @@
|
||||
#define _TTM_BO_API_H_
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
@ -263,6 +264,18 @@ static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_opera
|
||||
return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
|
||||
*
|
||||
* @placement: Return immediately if buffer is busy.
|
||||
* @mem: The struct ttm_resource indicating the region where the bo resides
|
||||
* @new_flags: Describes compatible placement found
|
||||
*
|
||||
* Returns true if the placement is compatible
|
||||
*/
|
||||
bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem,
|
||||
uint32_t *new_flags);
|
||||
|
||||
/**
|
||||
* ttm_bo_validate
|
||||
*
|
||||
@ -350,10 +363,9 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @placement: Initial placement for buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @ctx: TTM operation context for memory allocation.
|
||||
* @sg: Scatter-gather table.
|
||||
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
|
||||
* @destroy: Destroy function. Use NULL for kfree().
|
||||
*
|
||||
@ -394,7 +406,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @placement: Initial placement for buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @interruptible: If needing to sleep to wait for GPU resources,
|
||||
* sleep interruptible.
|
||||
@ -402,7 +414,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
||||
* holds a pointer to a persistent shmem object. Typically, this would
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @sg: Scatter-gather table.
|
||||
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
|
||||
* @destroy: Destroy function. Use NULL for kfree().
|
||||
*
|
||||
|
@ -322,7 +322,7 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
|
||||
*/
|
||||
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
|
||||
|
||||
void ttm_move_memcpy(bool clear,
|
||||
void ttm_move_memcpy(struct ttm_buffer_object *bo,
|
||||
u32 num_pages,
|
||||
struct ttm_kmap_iter *dst_iter,
|
||||
struct ttm_kmap_iter *src_iter);
|
||||
|
@ -27,26 +27,9 @@
|
||||
|
||||
#define TTM_NUM_CACHING_TYPES 3
|
||||
|
||||
/**
|
||||
* enum ttm_caching - CPU caching and BUS snooping behavior.
|
||||
*/
|
||||
enum ttm_caching {
|
||||
/**
|
||||
* @ttm_uncached: Most defensive option for device mappings,
|
||||
* don't even allow write combining.
|
||||
*/
|
||||
ttm_uncached,
|
||||
|
||||
/**
|
||||
* @ttm_write_combined: Don't cache read accesses, but allow at least
|
||||
* writes to be combined.
|
||||
*/
|
||||
ttm_write_combined,
|
||||
|
||||
/**
|
||||
* @ttm_cached: Fully cached like normal system memory, requires that
|
||||
* devices snoop the CPU cache on accesses.
|
||||
*/
|
||||
ttm_cached
|
||||
};
|
||||
|
||||
|
@ -39,23 +39,31 @@ struct ttm_operation_ctx;
|
||||
|
||||
/**
|
||||
* struct ttm_global - Buffer object driver global data.
|
||||
*
|
||||
* @dummy_read_page: Pointer to a dummy page used for mapping requests
|
||||
* of unpopulated pages.
|
||||
* @shrink: A shrink callback object used for buffer object swap.
|
||||
* @device_list_mutex: Mutex protecting the device list.
|
||||
* This mutex is held while traversing the device list for pm options.
|
||||
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
|
||||
* @device_list: List of buffer object devices.
|
||||
* @swap_lru: Lru list of buffer objects used for swapping.
|
||||
*/
|
||||
extern struct ttm_global {
|
||||
|
||||
/**
|
||||
* @dummy_read_page: Pointer to a dummy page used for mapping requests
|
||||
* of unpopulated pages. Constant after init.
|
||||
* Constant after init.
|
||||
*/
|
||||
|
||||
struct page *dummy_read_page;
|
||||
|
||||
/**
|
||||
* @device_list: List of buffer object devices. Protected by
|
||||
* ttm_global_mutex.
|
||||
* Protected by ttm_global_mutex.
|
||||
*/
|
||||
struct list_head device_list;
|
||||
|
||||
/**
|
||||
* @bo_count: Number of buffer objects allocated by devices.
|
||||
* Internal protection.
|
||||
*/
|
||||
atomic_t bo_count;
|
||||
} ttm_glob;
|
||||
@ -65,7 +73,7 @@ struct ttm_device_funcs {
|
||||
* ttm_tt_create
|
||||
*
|
||||
* @bo: The buffer object to create the ttm for.
|
||||
* @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
*
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
* No pages are actually allocated.
|
||||
@ -222,64 +230,49 @@ struct ttm_device_funcs {
|
||||
|
||||
/**
|
||||
* struct ttm_device - Buffer object driver device-specific data.
|
||||
*
|
||||
* @device_list: Our entry in the global device list.
|
||||
* @funcs: Function table for the device.
|
||||
* @sysman: Resource manager for the system domain.
|
||||
* @man_drv: An array of resource_managers.
|
||||
* @vma_manager: Address space manager.
|
||||
* @pool: page pool for the device.
|
||||
* @dev_mapping: A pointer to the struct address_space representing the
|
||||
* device address space.
|
||||
* @wq: Work queue structure for the delayed delete workqueue.
|
||||
*/
|
||||
struct ttm_device {
|
||||
/**
|
||||
* @device_list: Our entry in the global device list.
|
||||
/*
|
||||
* Constant after bo device init
|
||||
*/
|
||||
struct list_head device_list;
|
||||
|
||||
/**
|
||||
* @funcs: Function table for the device.
|
||||
* Constant after bo device init
|
||||
*/
|
||||
struct ttm_device_funcs *funcs;
|
||||
|
||||
/**
|
||||
* @sysman: Resource manager for the system domain.
|
||||
/*
|
||||
* Access via ttm_manager_type.
|
||||
*/
|
||||
struct ttm_resource_manager sysman;
|
||||
|
||||
/**
|
||||
* @man_drv: An array of resource_managers, one per resource type.
|
||||
*/
|
||||
struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
|
||||
|
||||
/**
|
||||
* @vma_manager: Address space manager for finding BOs to mmap.
|
||||
/*
|
||||
* Protected by internal locks.
|
||||
*/
|
||||
struct drm_vma_offset_manager *vma_manager;
|
||||
|
||||
/**
|
||||
* @pool: page pool for the device.
|
||||
*/
|
||||
struct ttm_pool pool;
|
||||
|
||||
/**
|
||||
* @lru_lock: Protection for the per manager LRU and ddestroy lists.
|
||||
/*
|
||||
* Protection for the per manager LRU and ddestroy lists.
|
||||
*/
|
||||
spinlock_t lru_lock;
|
||||
|
||||
/**
|
||||
* @ddestroy: Destroyed but not yet cleaned up buffer objects.
|
||||
*/
|
||||
struct list_head ddestroy;
|
||||
|
||||
/**
|
||||
* @pinned: Buffer objects which are pinned and so not on any LRU list.
|
||||
*/
|
||||
struct list_head pinned;
|
||||
|
||||
/**
|
||||
* @dev_mapping: A pointer to the struct address_space for invalidating
|
||||
* CPU mappings on buffer move. Protected by load/unload sync.
|
||||
/*
|
||||
* Protected by load / firstopen / lastclose /unload sync.
|
||||
*/
|
||||
struct address_space *dev_mapping;
|
||||
|
||||
/**
|
||||
* @wq: Work queue structure for the delayed delete workqueue.
|
||||
/*
|
||||
* Internal protection.
|
||||
*/
|
||||
struct delayed_work wq;
|
||||
};
|
||||
@ -291,15 +284,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
|
||||
static inline struct ttm_resource_manager *
|
||||
ttm_manager_type(struct ttm_device *bdev, int mem_type)
|
||||
{
|
||||
BUILD_BUG_ON(__builtin_constant_p(mem_type)
|
||||
&& mem_type >= TTM_NUM_MEM_TYPES);
|
||||
return bdev->man_drv[mem_type];
|
||||
}
|
||||
|
||||
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
|
||||
struct ttm_resource_manager *manager)
|
||||
{
|
||||
BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
|
||||
bdev->man_drv[type] = manager;
|
||||
}
|
||||
|
||||
@ -308,6 +298,5 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
|
||||
struct drm_vma_offset_manager *vma_manager,
|
||||
bool use_dma_alloc, bool use_dma32);
|
||||
void ttm_device_fini(struct ttm_device *bdev);
|
||||
void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
|
||||
|
||||
#endif
|
||||
|
@ -35,17 +35,6 @@
|
||||
|
||||
/*
|
||||
* Memory regions for data placement.
|
||||
*
|
||||
* Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
|
||||
* be swapped out whenever TTMs thinks it is a good idea.
|
||||
* In cases where drivers would like to use TTM_PL_SYSTEM as a valid
|
||||
* placement they need to be able to handle the issues that arise due to the
|
||||
* above manually.
|
||||
*
|
||||
* For BO's which reside in system memory but for which the accelerator
|
||||
* requires direct access (i.e. their usage needs to be synchronized
|
||||
* between the CPU and accelerator via fences) a new, driver private
|
||||
* placement that can handle such scenarios is a good idea.
|
||||
*/
|
||||
|
||||
#define TTM_PL_SYSTEM 0
|
||||
@ -69,7 +58,6 @@
|
||||
*
|
||||
* @fpfn: first valid page frame number to put the object
|
||||
* @lpfn: last valid page frame number to put the object
|
||||
* @mem_type: One of TTM_PL_* where the resource should be allocated from.
|
||||
* @flags: memory domain and caching flags for the object
|
||||
*
|
||||
* Structure indicating a possible place to put an object.
|
||||
|
@ -37,7 +37,7 @@ struct ttm_pool;
|
||||
struct ttm_operation_ctx;
|
||||
|
||||
/**
|
||||
* struct ttm_pool_type - Pool for a certain memory type
|
||||
* ttm_pool_type - Pool for a certain memory type
|
||||
*
|
||||
* @pool: the pool we belong to, might be NULL for the global ones
|
||||
* @order: the allocation order our pages have
|
||||
@ -58,9 +58,8 @@ struct ttm_pool_type {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_pool - Pool for all caching and orders
|
||||
* ttm_pool - Pool for all caching and orders
|
||||
*
|
||||
* @dev: the device we allocate pages for
|
||||
* @use_dma_alloc: if coherent DMA allocations should be used
|
||||
* @use_dma32: if GFP_DMA32 should be used
|
||||
* @caching: pools for each caching/order
|
||||
|
@ -4,7 +4,6 @@
|
||||
#define _TTM_RANGE_MANAGER_H_
|
||||
|
||||
#include <drm/ttm/ttm_resource.h>
|
||||
#include <drm/ttm/ttm_device.h>
|
||||
#include <drm/drm_mm.h>
|
||||
|
||||
/**
|
||||
@ -34,23 +33,10 @@ to_ttm_range_mgr_node(struct ttm_resource *res)
|
||||
return container_of(res, struct ttm_range_mgr_node, base);
|
||||
}
|
||||
|
||||
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
|
||||
int ttm_range_man_init(struct ttm_device *bdev,
|
||||
unsigned type, bool use_tt,
|
||||
unsigned long p_size);
|
||||
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
|
||||
int ttm_range_man_fini(struct ttm_device *bdev,
|
||||
unsigned type);
|
||||
static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
|
||||
unsigned int type, bool use_tt,
|
||||
unsigned long p_size)
|
||||
{
|
||||
BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
|
||||
return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
|
||||
}
|
||||
|
||||
static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
|
||||
unsigned int type)
|
||||
{
|
||||
BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
|
||||
return ttm_range_man_fini_nocheck(bdev, type);
|
||||
}
|
||||
#endif
|
||||
|
@ -40,7 +40,6 @@ struct ttm_resource_manager;
|
||||
struct ttm_resource;
|
||||
struct ttm_place;
|
||||
struct ttm_buffer_object;
|
||||
struct ttm_placement;
|
||||
struct dma_buf_map;
|
||||
struct io_mapping;
|
||||
struct sg_table;
|
||||
@ -103,7 +102,10 @@ struct ttm_resource_manager_func {
|
||||
* struct ttm_resource_manager
|
||||
*
|
||||
* @use_type: The memory type is enabled.
|
||||
* @use_tt: If a TT object should be used for the backing store.
|
||||
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
|
||||
* managed by this memory type.
|
||||
* @gpu_offset: If used, the GPU offset of the first managed page of
|
||||
* fixed memory or the first managed location in an aperture.
|
||||
* @size: Size of the managed region.
|
||||
* @func: structure pointer implementing the range manager. See above
|
||||
* @move_lock: lock for move fence
|
||||
@ -141,7 +143,6 @@ struct ttm_resource_manager {
|
||||
* @addr: mapped virtual address
|
||||
* @offset: physical addr
|
||||
* @is_iomem: is this io memory ?
|
||||
* @caching: See enum ttm_caching
|
||||
*
|
||||
* Structure indicating the bus placement of an object.
|
||||
*/
|
||||
@ -265,8 +266,6 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource **res);
|
||||
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
|
||||
bool ttm_resource_compat(struct ttm_resource *res,
|
||||
struct ttm_placement *placement);
|
||||
|
||||
void ttm_resource_manager_init(struct ttm_resource_manager *man,
|
||||
unsigned long p_size);
|
||||
|
@ -38,70 +38,36 @@ struct ttm_resource;
|
||||
struct ttm_buffer_object;
|
||||
struct ttm_operation_ctx;
|
||||
|
||||
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
|
||||
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
|
||||
#define TTM_PAGE_FLAG_SG (1 << 8)
|
||||
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
|
||||
|
||||
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
|
||||
|
||||
/**
|
||||
* struct ttm_tt - This is a structure holding the pages, caching- and aperture
|
||||
* binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
* struct ttm_tt
|
||||
*
|
||||
* @pages: Array of pages backing the data.
|
||||
* @page_flags: see TTM_PAGE_FLAG_*
|
||||
* @num_pages: Number of pages in the page array.
|
||||
* @sg: for SG objects via dma-buf
|
||||
* @dma_address: The DMA (bus) addresses of the pages
|
||||
* @swap_storage: Pointer to shmem struct file for swap storage.
|
||||
* @pages_list: used by some page allocation backend
|
||||
* @caching: The current caching state of the pages.
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
* memory.
|
||||
*/
|
||||
struct ttm_tt {
|
||||
/** @pages: Array of pages backing the data. */
|
||||
struct page **pages;
|
||||
/**
|
||||
* @page_flags: The page flags.
|
||||
*
|
||||
* Supported values:
|
||||
*
|
||||
* TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
|
||||
* and swapped out by TTM. Calling ttm_tt_populate() will then swap the
|
||||
* pages back in, and unset the flag. Drivers should in general never
|
||||
* need to touch this.
|
||||
*
|
||||
* TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
|
||||
* allocation.
|
||||
*
|
||||
* TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
|
||||
* externally, like with dma-buf or userptr. This effectively disables
|
||||
* TTM swapping out such pages. Also important is to prevent TTM from
|
||||
* ever directly mapping these pages.
|
||||
*
|
||||
* Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
|
||||
* this flag.
|
||||
*
|
||||
* TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
|
||||
* TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
|
||||
* still valid to use TTM to map the pages directly. This is useful when
|
||||
* implementing a ttm_tt backend which still allocates driver owned
|
||||
* pages underneath(say with shmem).
|
||||
*
|
||||
* Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
|
||||
* here should always be:
|
||||
*
|
||||
* page_flags = TTM_TT_FLAG_EXTERNAL |
|
||||
* TTM_TT_FLAG_EXTERNAL_MAPPABLE;
|
||||
*
|
||||
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
|
||||
* set by TTM after ttm_tt_populate() has successfully returned, and is
|
||||
* then unset when TTM calls ttm_tt_unpopulate().
|
||||
*/
|
||||
#define TTM_TT_FLAG_SWAPPED (1 << 0)
|
||||
#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
|
||||
#define TTM_TT_FLAG_EXTERNAL (1 << 2)
|
||||
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
|
||||
|
||||
#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
|
||||
uint32_t page_flags;
|
||||
/** @num_pages: Number of pages in the page array. */
|
||||
uint32_t num_pages;
|
||||
/** @sg: for SG objects via dma-buf. */
|
||||
struct sg_table *sg;
|
||||
/** @dma_address: The DMA (bus) addresses of the pages. */
|
||||
dma_addr_t *dma_address;
|
||||
/** @swap_storage: Pointer to shmem struct file for swap storage. */
|
||||
struct file *swap_storage;
|
||||
/**
|
||||
* @caching: The current caching state of the pages, see enum
|
||||
* ttm_caching.
|
||||
*/
|
||||
enum ttm_caching caching;
|
||||
};
|
||||
|
||||
@ -119,7 +85,7 @@ struct ttm_kmap_iter_tt {
|
||||
|
||||
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
|
||||
{
|
||||
return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
|
||||
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -138,7 +104,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @bo: The buffer object we create the ttm for.
|
||||
* @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
* @caching: the desired caching state of the pages
|
||||
*
|
||||
* Create a struct ttm_tt to back data with system memory pages.
|
||||
@ -161,15 +127,21 @@ int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
void ttm_tt_fini(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_destroy:
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
* @bdev: the ttm_device this object belongs to
|
||||
* @ttm: The struct ttm_tt.
|
||||
*
|
||||
* Unbind, unpopulate and destroy common struct ttm_tt.
|
||||
*/
|
||||
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_destroy_common:
|
||||
*
|
||||
* Called from driver to destroy common path.
|
||||
*/
|
||||
void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_swapin:
|
||||
*
|
||||
@ -184,19 +156,15 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
|
||||
/**
|
||||
* ttm_tt_populate - allocate pages for a ttm
|
||||
*
|
||||
* @bdev: the ttm_device this object belongs to
|
||||
* @ttm: Pointer to the ttm_tt structure
|
||||
* @ctx: operation context for populating the tt object.
|
||||
*
|
||||
* Calls the driver method to allocate pages for a ttm
|
||||
*/
|
||||
int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate - free pages from a ttm
|
||||
*
|
||||
* @bdev: the ttm_device this object belongs to
|
||||
* @ttm: Pointer to the ttm_tt structure
|
||||
*
|
||||
* Calls the driver method to free all pages from a ttm
|
||||
@ -213,7 +181,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
|
||||
*/
|
||||
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
||||
}
|
||||
|
||||
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
|
||||
@ -229,7 +197,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
|
||||
*
|
||||
* @bo: Buffer object we allocate the ttm for.
|
||||
* @bridge: The agp bridge this device is sitting on.
|
||||
* @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
|
||||
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
|
||||
*
|
||||
*
|
||||
* Create a TTM backend that uses the indicated AGP bridge as an aperture
|
||||
|
@ -158,7 +158,6 @@
|
||||
#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
|
||||
#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
|
||||
#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
|
||||
#define AM4_L3S_ADC1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x230)
|
||||
#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
|
||||
#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
|
||||
#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
|
||||
|
@ -84,10 +84,17 @@
|
||||
#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
|
||||
/* iva clocks */
|
||||
#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
|
||||
/* dss clocks */
|
||||
#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
|
||||
|
||||
/* gpu clocks */
|
||||
#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
|
||||
/* l3init clocks */
|
||||
#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
|
||||
@ -260,17 +267,10 @@
|
||||
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
|
||||
/* iva clocks */
|
||||
#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
|
||||
/* dss clocks */
|
||||
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
|
||||
|
||||
/* gpu clocks */
|
||||
#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
|
||||
|
||||
/* l3init clocks */
|
||||
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
|
||||
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
|
||||
|
@ -209,7 +209,6 @@
|
||||
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
|
||||
#define CLK_MOUT_HDMI 396
|
||||
#define CLK_MOUT_MIXER 397
|
||||
#define CLK_MOUT_VPLLSRC 398
|
||||
|
||||
/* gate clocks - ppmu */
|
||||
#define CLK_PPMULEFT 400
|
||||
@ -237,10 +236,9 @@
|
||||
#define CLK_DIV_C2C 458 /* Exynos4x12 only */
|
||||
#define CLK_DIV_GDL 459
|
||||
#define CLK_DIV_GDR 460
|
||||
#define CLK_DIV_CORE2 461
|
||||
|
||||
/* must be greater than maximal clock id */
|
||||
#define CLK_NR_CLKS 462
|
||||
#define CLK_NR_CLKS 461
|
||||
|
||||
/* Exynos4x12 ISP clocks */
|
||||
#define CLK_ISP_FIMC_ISP 1
|
||||
|
@ -19,7 +19,6 @@
|
||||
#define CLK_FOUT_EPLL 7
|
||||
#define CLK_FOUT_VPLL 8
|
||||
#define CLK_ARM_CLK 9
|
||||
#define CLK_DIV_ARM2 10
|
||||
|
||||
/* gate for special clocks (sclk) */
|
||||
#define CLK_SCLK_CAM_BAYER 128
|
||||
@ -175,9 +174,8 @@
|
||||
#define CLK_MOUT_ACLK300_DISP1_SUB 1027
|
||||
#define CLK_MOUT_APLL 1028
|
||||
#define CLK_MOUT_MPLL 1029
|
||||
#define CLK_MOUT_VPLLSRC 1030
|
||||
|
||||
/* must be greater than maximal clock id */
|
||||
#define CLK_NR_CLKS 1031
|
||||
#define CLK_NR_CLKS 1030
|
||||
|
||||
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
|
||||
|
@ -117,6 +117,7 @@
|
||||
#define IMX8MP_CLK_AUDIO_AHB 108
|
||||
#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
|
||||
#define IMX8MP_CLK_IPG_ROOT 110
|
||||
#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
|
||||
#define IMX8MP_CLK_DRAM_ALT 112
|
||||
#define IMX8MP_CLK_DRAM_APB 113
|
||||
#define IMX8MP_CLK_VPU_G1 114
|
||||
|
@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* This header provides clock numbers for the ingenic,jz4740-cgu DT binding.
|
||||
*
|
||||
@ -33,5 +34,6 @@
|
||||
#define JZ4740_CLK_ADC 19
|
||||
#define JZ4740_CLK_I2C 20
|
||||
#define JZ4740_CLK_AIC 21
|
||||
#define JZ4740_CLK_TCU 22
|
||||
|
||||
#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */
|
||||
|
@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* This header provides clock numbers for the ingenic,jz4780-cgu DT binding.
|
||||
*
|
||||
@ -11,78 +12,80 @@
|
||||
#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
|
||||
#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
|
||||
|
||||
#define JZ4780_CLK_EXCLK 0
|
||||
#define JZ4780_CLK_RTCLK 1
|
||||
#define JZ4780_CLK_APLL 2
|
||||
#define JZ4780_CLK_MPLL 3
|
||||
#define JZ4780_CLK_EPLL 4
|
||||
#define JZ4780_CLK_VPLL 5
|
||||
#define JZ4780_CLK_OTGPHY 6
|
||||
#define JZ4780_CLK_SCLKA 7
|
||||
#define JZ4780_CLK_CPUMUX 8
|
||||
#define JZ4780_CLK_CPU 9
|
||||
#define JZ4780_CLK_L2CACHE 10
|
||||
#define JZ4780_CLK_AHB0 11
|
||||
#define JZ4780_CLK_AHB2PMUX 12
|
||||
#define JZ4780_CLK_AHB2 13
|
||||
#define JZ4780_CLK_PCLK 14
|
||||
#define JZ4780_CLK_DDR 15
|
||||
#define JZ4780_CLK_VPU 16
|
||||
#define JZ4780_CLK_I2SPLL 17
|
||||
#define JZ4780_CLK_I2S 18
|
||||
#define JZ4780_CLK_EXCLK 0
|
||||
#define JZ4780_CLK_RTCLK 1
|
||||
#define JZ4780_CLK_APLL 2
|
||||
#define JZ4780_CLK_MPLL 3
|
||||
#define JZ4780_CLK_EPLL 4
|
||||
#define JZ4780_CLK_VPLL 5
|
||||
#define JZ4780_CLK_OTGPHY 6
|
||||
#define JZ4780_CLK_SCLKA 7
|
||||
#define JZ4780_CLK_CPUMUX 8
|
||||
#define JZ4780_CLK_CPU 9
|
||||
#define JZ4780_CLK_L2CACHE 10
|
||||
#define JZ4780_CLK_AHB0 11
|
||||
#define JZ4780_CLK_AHB2PMUX 12
|
||||
#define JZ4780_CLK_AHB2 13
|
||||
#define JZ4780_CLK_PCLK 14
|
||||
#define JZ4780_CLK_DDR 15
|
||||
#define JZ4780_CLK_VPU 16
|
||||
#define JZ4780_CLK_I2SPLL 17
|
||||
#define JZ4780_CLK_I2S 18
|
||||
#define JZ4780_CLK_LCD0PIXCLK 19
|
||||
#define JZ4780_CLK_LCD1PIXCLK 20
|
||||
#define JZ4780_CLK_MSCMUX 21
|
||||
#define JZ4780_CLK_MSC0 22
|
||||
#define JZ4780_CLK_MSC1 23
|
||||
#define JZ4780_CLK_MSC2 24
|
||||
#define JZ4780_CLK_UHC 25
|
||||
#define JZ4780_CLK_SSIPLL 26
|
||||
#define JZ4780_CLK_SSI 27
|
||||
#define JZ4780_CLK_CIMMCLK 28
|
||||
#define JZ4780_CLK_PCMPLL 29
|
||||
#define JZ4780_CLK_PCM 30
|
||||
#define JZ4780_CLK_GPU 31
|
||||
#define JZ4780_CLK_HDMI 32
|
||||
#define JZ4780_CLK_BCH 33
|
||||
#define JZ4780_CLK_NEMC 34
|
||||
#define JZ4780_CLK_OTG0 35
|
||||
#define JZ4780_CLK_SSI0 36
|
||||
#define JZ4780_CLK_SMB0 37
|
||||
#define JZ4780_CLK_SMB1 38
|
||||
#define JZ4780_CLK_SCC 39
|
||||
#define JZ4780_CLK_AIC 40
|
||||
#define JZ4780_CLK_TSSI0 41
|
||||
#define JZ4780_CLK_OWI 42
|
||||
#define JZ4780_CLK_KBC 43
|
||||
#define JZ4780_CLK_SADC 44
|
||||
#define JZ4780_CLK_UART0 45
|
||||
#define JZ4780_CLK_UART1 46
|
||||
#define JZ4780_CLK_UART2 47
|
||||
#define JZ4780_CLK_UART3 48
|
||||
#define JZ4780_CLK_SSI1 49
|
||||
#define JZ4780_CLK_SSI2 50
|
||||
#define JZ4780_CLK_PDMA 51
|
||||
#define JZ4780_CLK_GPS 52
|
||||
#define JZ4780_CLK_MAC 53
|
||||
#define JZ4780_CLK_SMB2 54
|
||||
#define JZ4780_CLK_CIM 55
|
||||
#define JZ4780_CLK_LCD 56
|
||||
#define JZ4780_CLK_TVE 57
|
||||
#define JZ4780_CLK_IPU 58
|
||||
#define JZ4780_CLK_DDR0 59
|
||||
#define JZ4780_CLK_DDR1 60
|
||||
#define JZ4780_CLK_SMB3 61
|
||||
#define JZ4780_CLK_TSSI1 62
|
||||
#define JZ4780_CLK_COMPRESS 63
|
||||
#define JZ4780_CLK_AIC1 64
|
||||
#define JZ4780_CLK_GPVLC 65
|
||||
#define JZ4780_CLK_OTG1 66
|
||||
#define JZ4780_CLK_UART4 67
|
||||
#define JZ4780_CLK_AHBMON 68
|
||||
#define JZ4780_CLK_SMB4 69
|
||||
#define JZ4780_CLK_DES 70
|
||||
#define JZ4780_CLK_X2D 71
|
||||
#define JZ4780_CLK_CORE1 72
|
||||
#define JZ4780_CLK_MSCMUX 21
|
||||
#define JZ4780_CLK_MSC0 22
|
||||
#define JZ4780_CLK_MSC1 23
|
||||
#define JZ4780_CLK_MSC2 24
|
||||
#define JZ4780_CLK_UHC 25
|
||||
#define JZ4780_CLK_SSIPLL 26
|
||||
#define JZ4780_CLK_SSI 27
|
||||
#define JZ4780_CLK_CIMMCLK 28
|
||||
#define JZ4780_CLK_PCMPLL 29
|
||||
#define JZ4780_CLK_PCM 30
|
||||
#define JZ4780_CLK_GPU 31
|
||||
#define JZ4780_CLK_HDMI 32
|
||||
#define JZ4780_CLK_BCH 33
|
||||
#define JZ4780_CLK_NEMC 34
|
||||
#define JZ4780_CLK_OTG0 35
|
||||
#define JZ4780_CLK_SSI0 36
|
||||
#define JZ4780_CLK_SMB0 37
|
||||
#define JZ4780_CLK_SMB1 38
|
||||
#define JZ4780_CLK_SCC 39
|
||||
#define JZ4780_CLK_AIC 40
|
||||
#define JZ4780_CLK_TSSI0 41
|
||||
#define JZ4780_CLK_OWI 42
|
||||
#define JZ4780_CLK_KBC 43
|
||||
#define JZ4780_CLK_SADC 44
|
||||
#define JZ4780_CLK_UART0 45
|
||||
#define JZ4780_CLK_UART1 46
|
||||
#define JZ4780_CLK_UART2 47
|
||||
#define JZ4780_CLK_UART3 48
|
||||
#define JZ4780_CLK_SSI1 49
|
||||
#define JZ4780_CLK_SSI2 50
|
||||
#define JZ4780_CLK_PDMA 51
|
||||
#define JZ4780_CLK_GPS 52
|
||||
#define JZ4780_CLK_MAC 53
|
||||
#define JZ4780_CLK_SMB2 54
|
||||
#define JZ4780_CLK_CIM 55
|
||||
#define JZ4780_CLK_LCD 56
|
||||
#define JZ4780_CLK_TVE 57
|
||||
#define JZ4780_CLK_IPU 58
|
||||
#define JZ4780_CLK_DDR0 59
|
||||
#define JZ4780_CLK_DDR1 60
|
||||
#define JZ4780_CLK_SMB3 61
|
||||
#define JZ4780_CLK_TSSI1 62
|
||||
#define JZ4780_CLK_COMPRESS 63
|
||||
#define JZ4780_CLK_AIC1 64
|
||||
#define JZ4780_CLK_GPVLC 65
|
||||
#define JZ4780_CLK_OTG1 66
|
||||
#define JZ4780_CLK_UART4 67
|
||||
#define JZ4780_CLK_AHBMON 68
|
||||
#define JZ4780_CLK_SMB4 69
|
||||
#define JZ4780_CLK_DES 70
|
||||
#define JZ4780_CLK_X2D 71
|
||||
#define JZ4780_CLK_CORE1 72
|
||||
#define JZ4780_CLK_EXCLK_DIV512 73
|
||||
#define JZ4780_CLK_RTC 74
|
||||
|
||||
#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
|
||||
|
@ -105,16 +105,6 @@
|
||||
#define CLKID_PERIPH 126
|
||||
#define CLKID_AXI 128
|
||||
#define CLKID_L2_DRAM 130
|
||||
#define CLKID_HDMI_PLL_HDMI_OUT 132
|
||||
#define CLKID_VID_PLL_FINAL_DIV 137
|
||||
#define CLKID_VCLK_IN_SEL 138
|
||||
#define CLKID_VCLK2_IN_SEL 149
|
||||
#define CLKID_CTS_ENCT 161
|
||||
#define CLKID_CTS_ENCP 163
|
||||
#define CLKID_CTS_ENCI 165
|
||||
#define CLKID_HDMI_TX_PIXEL 167
|
||||
#define CLKID_CTS_ENCL 169
|
||||
#define CLKID_CTS_VDAC0 171
|
||||
#define CLKID_HDMI_SYS 174
|
||||
#define CLKID_VPU 190
|
||||
#define CLKID_VDEC_1 196
|
||||
|
@ -148,18 +148,6 @@
|
||||
#define GCC_USB30_SLEEP_CLK 138
|
||||
#define GCC_USB_HS_AHB_CLK 139
|
||||
#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140
|
||||
#define CONFIG_NOC_CLK_SRC 141
|
||||
#define PERIPH_NOC_CLK_SRC 142
|
||||
#define SYSTEM_NOC_CLK_SRC 143
|
||||
#define GPLL0_OUT_MMSSCC 144
|
||||
#define GPLL0_OUT_MSSCC 145
|
||||
#define PCIE_0_PHY_LDO 146
|
||||
#define PCIE_1_PHY_LDO 147
|
||||
#define UFS_PHY_LDO 148
|
||||
#define USB_SS_PHY_LDO 149
|
||||
#define GCC_BOOT_ROM_AHB_CLK 150
|
||||
#define GCC_PRNG_AHB_CLK 151
|
||||
#define GCC_USB3_PHY_PIPE_CLK 152
|
||||
|
||||
/* GDSCs */
|
||||
#define PCIE_GDSC 0
|
||||
@ -174,6 +162,5 @@
|
||||
#define PCIE_PHY_0_RESET 2
|
||||
#define PCIE_PHY_1_RESET 3
|
||||
#define QUSB2_PHY_RESET 4
|
||||
#define MSS_RESET 5
|
||||
|
||||
#endif
|
||||
|
@ -159,11 +159,5 @@
|
||||
#define RPM_SMD_SNOC_PERIPH_A_CLK 113
|
||||
#define RPM_SMD_SNOC_LPASS_CLK 114
|
||||
#define RPM_SMD_SNOC_LPASS_A_CLK 115
|
||||
#define RPM_SMD_HWKM_CLK 116
|
||||
#define RPM_SMD_HWKM_A_CLK 117
|
||||
#define RPM_SMD_PKA_CLK 118
|
||||
#define RPM_SMD_PKA_A_CLK 119
|
||||
#define RPM_SMD_CPUSS_GNOC_CLK 120
|
||||
#define RPM_SMD_CPUSS_GNOC_A_CLK 121
|
||||
|
||||
#endif
|
||||
|
@ -113,7 +113,7 @@
|
||||
#define CLK_USB_OHCI0 91
|
||||
|
||||
#define CLK_USB_OHCI1 93
|
||||
#define CLK_DRAM 94
|
||||
|
||||
#define CLK_DRAM_VE 95
|
||||
#define CLK_DRAM_CSI 96
|
||||
#define CLK_DRAM_DEINTERLACE 97
|
||||
|
@ -126,7 +126,7 @@
|
||||
#define CLK_USB_OHCI1 93
|
||||
#define CLK_USB_OHCI2 94
|
||||
#define CLK_USB_OHCI3 95
|
||||
#define CLK_DRAM 96
|
||||
|
||||
#define CLK_DRAM_VE 97
|
||||
#define CLK_DRAM_CSI 98
|
||||
#define CLK_DRAM_DEINTERLACE 99
|
||||
|
@ -4,31 +4,11 @@
|
||||
#ifndef DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
|
||||
#define DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @defgroup bpmp_clock_ids Clock ID's
|
||||
* @{
|
||||
*/
|
||||
/**
|
||||
* @brief controls the EMC clock frequency.
|
||||
* @details Doing a clk_set_rate on this clock will select the
|
||||
* appropriate clock source, program the source rate and execute a
|
||||
* specific sequence to switch to the new clock source for both memory
|
||||
* controllers. This can be used to control the balance between memory
|
||||
* throughput and memory controller power.
|
||||
*/
|
||||
#define TEGRA234_CLK_EMC 31U
|
||||
/** @brief output of gate CLK_ENB_FUSE */
|
||||
#define TEGRA234_CLK_FUSE 40U
|
||||
#define TEGRA234_CLK_FUSE 40
|
||||
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
|
||||
#define TEGRA234_CLK_SDMMC4 123U
|
||||
#define TEGRA234_CLK_SDMMC4 123
|
||||
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
|
||||
#define TEGRA234_CLK_UARTA 155U
|
||||
/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */
|
||||
#define TEGRA234_CLK_SDMMC_LEGACY_TM 219U
|
||||
/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
|
||||
#define TEGRA234_CLK_PLLC4 237U
|
||||
/** @brief 32K input clock provided by PMIC */
|
||||
#define TEGRA234_CLK_CLK_32K 289U
|
||||
#define TEGRA234_CLK_UARTA 155
|
||||
|
||||
#endif
|
||||
|
@ -50,75 +50,4 @@
|
||||
#define MSC313_GPIO_SPI0_DI (MSC313_GPIO_SPI0 + 2)
|
||||
#define MSC313_GPIO_SPI0_DO (MSC313_GPIO_SPI0 + 3)
|
||||
|
||||
/* SSD20x */
|
||||
#define SSD20XD_GPIO_FUART 0
|
||||
#define SSD20XD_GPIO_FUART_RX (SSD20XD_GPIO_FUART + 0)
|
||||
#define SSD20XD_GPIO_FUART_TX (SSD20XD_GPIO_FUART + 1)
|
||||
#define SSD20XD_GPIO_FUART_CTS (SSD20XD_GPIO_FUART + 2)
|
||||
#define SSD20XD_GPIO_FUART_RTS (SSD20XD_GPIO_FUART + 3)
|
||||
|
||||
#define SSD20XD_GPIO_SD (SSD20XD_GPIO_FUART_RTS + 1)
|
||||
#define SSD20XD_GPIO_SD_CLK (SSD20XD_GPIO_SD + 0)
|
||||
#define SSD20XD_GPIO_SD_CMD (SSD20XD_GPIO_SD + 1)
|
||||
#define SSD20XD_GPIO_SD_D0 (SSD20XD_GPIO_SD + 2)
|
||||
#define SSD20XD_GPIO_SD_D1 (SSD20XD_GPIO_SD + 3)
|
||||
#define SSD20XD_GPIO_SD_D2 (SSD20XD_GPIO_SD + 4)
|
||||
#define SSD20XD_GPIO_SD_D3 (SSD20XD_GPIO_SD + 5)
|
||||
|
||||
#define SSD20XD_GPIO_UART0 (SSD20XD_GPIO_SD_D3 + 1)
|
||||
#define SSD20XD_GPIO_UART0_RX (SSD20XD_GPIO_UART0 + 0)
|
||||
#define SSD20XD_GPIO_UART0_TX (SSD20XD_GPIO_UART0 + 1)
|
||||
|
||||
#define SSD20XD_GPIO_UART1 (SSD20XD_GPIO_UART0_TX + 1)
|
||||
#define SSD20XD_GPIO_UART1_RX (SSD20XD_GPIO_UART1 + 0)
|
||||
#define SSD20XD_GPIO_UART1_TX (SSD20XD_GPIO_UART1 + 1)
|
||||
|
||||
#define SSD20XD_GPIO_TTL (SSD20XD_GPIO_UART1_TX + 1)
|
||||
#define SSD20XD_GPIO_TTL0 (SSD20XD_GPIO_TTL + 0)
|
||||
#define SSD20XD_GPIO_TTL1 (SSD20XD_GPIO_TTL + 1)
|
||||
#define SSD20XD_GPIO_TTL2 (SSD20XD_GPIO_TTL + 2)
|
||||
#define SSD20XD_GPIO_TTL3 (SSD20XD_GPIO_TTL + 3)
|
||||
#define SSD20XD_GPIO_TTL4 (SSD20XD_GPIO_TTL + 4)
|
||||
#define SSD20XD_GPIO_TTL5 (SSD20XD_GPIO_TTL + 5)
|
||||
#define SSD20XD_GPIO_TTL6 (SSD20XD_GPIO_TTL + 6)
|
||||
#define SSD20XD_GPIO_TTL7 (SSD20XD_GPIO_TTL + 7)
|
||||
#define SSD20XD_GPIO_TTL8 (SSD20XD_GPIO_TTL + 8)
|
||||
#define SSD20XD_GPIO_TTL9 (SSD20XD_GPIO_TTL + 9)
|
||||
#define SSD20XD_GPIO_TTL10 (SSD20XD_GPIO_TTL + 10)
|
||||
#define SSD20XD_GPIO_TTL11 (SSD20XD_GPIO_TTL + 11)
|
||||
#define SSD20XD_GPIO_TTL12 (SSD20XD_GPIO_TTL + 12)
|
||||
#define SSD20XD_GPIO_TTL13 (SSD20XD_GPIO_TTL + 13)
|
||||
#define SSD20XD_GPIO_TTL14 (SSD20XD_GPIO_TTL + 14)
|
||||
#define SSD20XD_GPIO_TTL15 (SSD20XD_GPIO_TTL + 15)
|
||||
#define SSD20XD_GPIO_TTL16 (SSD20XD_GPIO_TTL + 16)
|
||||
#define SSD20XD_GPIO_TTL17 (SSD20XD_GPIO_TTL + 17)
|
||||
#define SSD20XD_GPIO_TTL18 (SSD20XD_GPIO_TTL + 18)
|
||||
#define SSD20XD_GPIO_TTL19 (SSD20XD_GPIO_TTL + 19)
|
||||
#define SSD20XD_GPIO_TTL20 (SSD20XD_GPIO_TTL + 20)
|
||||
#define SSD20XD_GPIO_TTL21 (SSD20XD_GPIO_TTL + 21)
|
||||
#define SSD20XD_GPIO_TTL22 (SSD20XD_GPIO_TTL + 22)
|
||||
#define SSD20XD_GPIO_TTL23 (SSD20XD_GPIO_TTL + 23)
|
||||
#define SSD20XD_GPIO_TTL24 (SSD20XD_GPIO_TTL + 24)
|
||||
#define SSD20XD_GPIO_TTL25 (SSD20XD_GPIO_TTL + 25)
|
||||
#define SSD20XD_GPIO_TTL26 (SSD20XD_GPIO_TTL + 26)
|
||||
#define SSD20XD_GPIO_TTL27 (SSD20XD_GPIO_TTL + 27)
|
||||
|
||||
#define SSD20XD_GPIO_GPIO (SSD20XD_GPIO_TTL27 + 1)
|
||||
#define SSD20XD_GPIO_GPIO0 (SSD20XD_GPIO_GPIO + 0)
|
||||
#define SSD20XD_GPIO_GPIO1 (SSD20XD_GPIO_GPIO + 1)
|
||||
#define SSD20XD_GPIO_GPIO2 (SSD20XD_GPIO_GPIO + 2)
|
||||
#define SSD20XD_GPIO_GPIO3 (SSD20XD_GPIO_GPIO + 3)
|
||||
#define SSD20XD_GPIO_GPIO4 (SSD20XD_GPIO_GPIO + 4)
|
||||
#define SSD20XD_GPIO_GPIO5 (SSD20XD_GPIO_GPIO + 5)
|
||||
#define SSD20XD_GPIO_GPIO6 (SSD20XD_GPIO_GPIO + 6)
|
||||
#define SSD20XD_GPIO_GPIO7 (SSD20XD_GPIO_GPIO + 7)
|
||||
#define SSD20XD_GPIO_GPIO10 (SSD20XD_GPIO_GPIO + 8)
|
||||
#define SSD20XD_GPIO_GPIO11 (SSD20XD_GPIO_GPIO + 9)
|
||||
#define SSD20XD_GPIO_GPIO12 (SSD20XD_GPIO_GPIO + 10)
|
||||
#define SSD20XD_GPIO_GPIO13 (SSD20XD_GPIO_GPIO + 11)
|
||||
#define SSD20XD_GPIO_GPIO14 (SSD20XD_GPIO_GPIO + 12)
|
||||
#define SSD20XD_GPIO_GPIO85 (SSD20XD_GPIO_GPIO + 13)
|
||||
#define SSD20XD_GPIO_GPIO86 (SSD20XD_GPIO_GPIO + 14)
|
||||
#define SSD20XD_GPIO_GPIO90 (SSD20XD_GPIO_GPIO + 15)
|
||||
|
||||
#endif /* _DT_BINDINGS_MSC313_GPIO_H */
|
||||
|
@ -60,13 +60,6 @@
|
||||
#define LED_FUNCTION_MICMUTE "micmute"
|
||||
#define LED_FUNCTION_MUTE "mute"
|
||||
|
||||
/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */
|
||||
#define LED_FUNCTION_PLAYER1 "player-1"
|
||||
#define LED_FUNCTION_PLAYER2 "player-2"
|
||||
#define LED_FUNCTION_PLAYER3 "player-3"
|
||||
#define LED_FUNCTION_PLAYER4 "player-4"
|
||||
#define LED_FUNCTION_PLAYER5 "player-5"
|
||||
|
||||
/* Miscelleaus functions. Use functions above if you can. */
|
||||
#define LED_FUNCTION_ACTIVITY "activity"
|
||||
#define LED_FUNCTION_ALARM "alarm"
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
/* Signal IDs for MPROC protocol */
|
||||
#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
|
||||
#define IPCC_MPROC_SIGNAL_TZ 1
|
||||
#define IPCC_MPROC_SIGNAL_SMP2P 2
|
||||
#define IPCC_MPROC_SIGNAL_PING 3
|
||||
|
||||
@ -30,7 +29,6 @@
|
||||
#define IPCC_CLIENT_PCIE1 14
|
||||
#define IPCC_CLIENT_PCIE2 15
|
||||
#define IPCC_CLIENT_SPSS 16
|
||||
#define IPCC_CLIENT_TME 23
|
||||
#define IPCC_CLIENT_WPSS 24
|
||||
|
||||
#endif
|
||||
|
@ -95,26 +95,4 @@
|
||||
#define AM64_SERDES0_LANE0_PCIE0 0x0
|
||||
#define AM64_SERDES0_LANE0_USB 0x1
|
||||
|
||||
/* J721S2 */
|
||||
|
||||
#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
|
||||
#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
|
||||
#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
|
||||
#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
|
||||
|
||||
#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
|
||||
#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
|
||||
#define J721S2_SERDES0_LANE1_USB 0x2
|
||||
#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
|
||||
|
||||
#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
|
||||
#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
|
||||
#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
|
||||
#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
|
||||
|
||||
#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
|
||||
#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
|
||||
#define J721S2_SERDES0_LANE3_USB 0x2
|
||||
#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
|
||||
|
||||
#endif /* _DT_BINDINGS_MUX_TI_SERDES */
|
||||
|
@ -6,18 +6,15 @@
|
||||
#ifndef _DT_BINDINGS_CADENCE_SERDES_H
|
||||
#define _DT_BINDINGS_CADENCE_SERDES_H
|
||||
|
||||
#define CDNS_SERDES_NO_SSC 0
|
||||
#define CDNS_SERDES_EXTERNAL_SSC 1
|
||||
#define CDNS_SERDES_INTERNAL_SSC 2
|
||||
|
||||
/* Torrent */
|
||||
#define TORRENT_SERDES_NO_SSC 0
|
||||
#define TORRENT_SERDES_EXTERNAL_SSC 1
|
||||
#define TORRENT_SERDES_INTERNAL_SSC 2
|
||||
|
||||
#define CDNS_TORRENT_REFCLK_DRIVER 0
|
||||
#define CDNS_TORRENT_DERIVED_REFCLK 1
|
||||
#define CDNS_TORRENT_RECEIVED_REFCLK 2
|
||||
|
||||
/* Sierra */
|
||||
#define CDNS_SIERRA_PLL_CMNLC 0
|
||||
#define CDNS_SIERRA_PLL_CMNLC1 1
|
||||
#define CDNS_SIERRA_DERIVED_REFCLK 2
|
||||
|
||||
#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
|
||||
|
@ -38,7 +38,4 @@
|
||||
#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
|
||||
#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
|
||||
|
||||
#define J721S2_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
|
||||
#define J721S2_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
|
||||
|
||||
#endif
|
||||
|
@ -16,15 +16,6 @@
|
||||
#define MTK_PUPD_SET_R1R0_10 102
|
||||
#define MTK_PUPD_SET_R1R0_11 103
|
||||
|
||||
#define MTK_PULL_SET_RSEL_000 200
|
||||
#define MTK_PULL_SET_RSEL_001 201
|
||||
#define MTK_PULL_SET_RSEL_010 202
|
||||
#define MTK_PULL_SET_RSEL_011 203
|
||||
#define MTK_PULL_SET_RSEL_100 204
|
||||
#define MTK_PULL_SET_RSEL_101 205
|
||||
#define MTK_PULL_SET_RSEL_110 206
|
||||
#define MTK_PULL_SET_RSEL_111 207
|
||||
|
||||
#define MTK_DRIVE_2mA 2
|
||||
#define MTK_DRIVE_4mA 4
|
||||
#define MTK_DRIVE_6mA 6
|
||||
|
@ -36,10 +36,7 @@
|
||||
#define EXYNOS5260_PIN_DRV_LV4 2
|
||||
#define EXYNOS5260_PIN_DRV_LV6 3
|
||||
|
||||
/*
|
||||
* Drive strengths for Exynos5410, Exynos542x, Exynos5800 and Exynos850 (except
|
||||
* GPIO_HSI block)
|
||||
*/
|
||||
/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
|
||||
#define EXYNOS5420_PIN_DRV_LV1 0
|
||||
#define EXYNOS5420_PIN_DRV_LV2 1
|
||||
#define EXYNOS5420_PIN_DRV_LV3 2
|
||||
@ -59,14 +56,6 @@
|
||||
#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc
|
||||
#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf
|
||||
|
||||
/* Drive strengths for Exynos850 GPIO_HSI block */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV1 0 /* 1x */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV1_5 1 /* 1.5x */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV2 2 /* 2x */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV2_5 3 /* 2.5x */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV3 4 /* 3x */
|
||||
#define EXYNOS850_HSI_PIN_DRV_LV4 5 /* 4x */
|
||||
|
||||
#define EXYNOS_PIN_FUNC_INPUT 0
|
||||
#define EXYNOS_PIN_FUNC_OUTPUT 1
|
||||
#define EXYNOS_PIN_FUNC_2 2
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user