mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-14 11:15:54 +00:00
phase 6
This commit is contained in:
parent
4bdaa608f6
commit
d0034bac99
@ -721,7 +721,7 @@ config XILINX_DMA
|
|||||||
|
|
||||||
config XILINX_ZYNQMP_DMA
|
config XILINX_ZYNQMP_DMA
|
||||||
tristate "Xilinx ZynqMP DMA Engine"
|
tristate "Xilinx ZynqMP DMA Engine"
|
||||||
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
|
depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for Xilinx ZynqMP DMA controller.
|
Enable support for Xilinx ZynqMP DMA controller.
|
||||||
|
@ -585,16 +585,14 @@ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
|
|||||||
struct msgdma_sw_desc *desc, *next;
|
struct msgdma_sw_desc *desc, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
|
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
list_del(&desc->node);
|
list_del(&desc->node);
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
|
||||||
spin_unlock(&mdev->lock);
|
spin_unlock(&mdev->lock);
|
||||||
callback(callback_param);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
spin_lock(&mdev->lock);
|
spin_lock(&mdev->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -731,7 +731,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(chan2dev(chan), "can't get descriptor\n");
|
dev_err(chan2dev(chan), "can't get descriptor\n");
|
||||||
if (first)
|
if (first)
|
||||||
list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
list_splice_tail_init(&first->descs_list,
|
||||||
|
&atchan->free_descs_list);
|
||||||
goto spin_unlock;
|
goto spin_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,7 +820,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
|||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(chan2dev(chan), "can't get descriptor\n");
|
dev_err(chan2dev(chan), "can't get descriptor\n");
|
||||||
if (first)
|
if (first)
|
||||||
list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
list_splice_tail_init(&first->descs_list,
|
||||||
|
&atchan->free_descs_list);
|
||||||
spin_unlock_irqrestore(&atchan->lock, irqflags);
|
spin_unlock_irqrestore(&atchan->lock, irqflags);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1053,8 +1055,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
|||||||
src_addr, dst_addr,
|
src_addr, dst_addr,
|
||||||
xt, chunk);
|
xt, chunk);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
list_splice_init(&first->descs_list,
|
list_splice_tail_init(&first->descs_list,
|
||||||
&atchan->free_descs_list);
|
&atchan->free_descs_list);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1134,7 +1136,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(chan2dev(chan), "can't get descriptor\n");
|
dev_err(chan2dev(chan), "can't get descriptor\n");
|
||||||
if (first)
|
if (first)
|
||||||
list_splice_init(&first->descs_list, &atchan->free_descs_list);
|
list_splice_tail_init(&first->descs_list,
|
||||||
|
&atchan->free_descs_list);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1310,8 +1313,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
sg_dma_len(sg),
|
sg_dma_len(sg),
|
||||||
value);
|
value);
|
||||||
if (!desc && first)
|
if (!desc && first)
|
||||||
list_splice_init(&first->descs_list,
|
list_splice_tail_init(&first->descs_list,
|
||||||
&atchan->free_descs_list);
|
&atchan->free_descs_list);
|
||||||
|
|
||||||
if (!first)
|
if (!first)
|
||||||
first = desc;
|
first = desc;
|
||||||
@ -1584,20 +1587,6 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call must be protected by lock. */
|
|
||||||
static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
|
|
||||||
struct at_xdmac_desc *desc)
|
|
||||||
{
|
|
||||||
dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove the transfer from the transfer list then move the transfer
|
|
||||||
* descriptors into the free descriptors list.
|
|
||||||
*/
|
|
||||||
list_del(&desc->xfer_node);
|
|
||||||
list_splice_init(&desc->descs_list, &atchan->free_descs_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
||||||
{
|
{
|
||||||
struct at_xdmac_desc *desc;
|
struct at_xdmac_desc *desc;
|
||||||
@ -1606,14 +1595,14 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
|||||||
* If channel is enabled, do nothing, advance_work will be triggered
|
* If channel is enabled, do nothing, advance_work will be triggered
|
||||||
* after the interruption.
|
* after the interruption.
|
||||||
*/
|
*/
|
||||||
if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
|
if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
|
||||||
desc = list_first_entry(&atchan->xfers_list,
|
return;
|
||||||
struct at_xdmac_desc,
|
|
||||||
xfer_node);
|
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
|
||||||
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
xfer_node);
|
||||||
if (!desc->active_xfer)
|
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||||||
at_xdmac_start_xfer(atchan, desc);
|
if (!desc->active_xfer)
|
||||||
}
|
at_xdmac_start_xfer(atchan, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
||||||
@ -1622,6 +1611,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
|||||||
struct dma_async_tx_descriptor *txd;
|
struct dma_async_tx_descriptor *txd;
|
||||||
|
|
||||||
spin_lock_irq(&atchan->lock);
|
spin_lock_irq(&atchan->lock);
|
||||||
|
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
|
||||||
|
__func__, atchan->irq_status);
|
||||||
if (list_empty(&atchan->xfers_list)) {
|
if (list_empty(&atchan->xfers_list)) {
|
||||||
spin_unlock_irq(&atchan->lock);
|
spin_unlock_irq(&atchan->lock);
|
||||||
return;
|
return;
|
||||||
@ -1634,6 +1625,7 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
|||||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called with atchan->lock held. */
|
||||||
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||||||
@ -1652,8 +1644,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
|||||||
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
|
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
|
||||||
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
|
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
|
||||||
|
|
||||||
spin_lock_irq(&atchan->lock);
|
|
||||||
|
|
||||||
/* Channel must be disabled first as it's not done automatically */
|
/* Channel must be disabled first as it's not done automatically */
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
||||||
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
|
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
|
||||||
@ -1663,8 +1653,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
|||||||
struct at_xdmac_desc,
|
struct at_xdmac_desc,
|
||||||
xfer_node);
|
xfer_node);
|
||||||
|
|
||||||
spin_unlock_irq(&atchan->lock);
|
|
||||||
|
|
||||||
/* Print bad descriptor's details if needed */
|
/* Print bad descriptor's details if needed */
|
||||||
dev_dbg(chan2dev(&atchan->chan),
|
dev_dbg(chan2dev(&atchan->chan),
|
||||||
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
|
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
|
||||||
@ -1678,50 +1666,54 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
|
|||||||
{
|
{
|
||||||
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
|
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
|
||||||
struct at_xdmac_desc *desc;
|
struct at_xdmac_desc *desc;
|
||||||
|
struct dma_async_tx_descriptor *txd;
|
||||||
u32 error_mask;
|
u32 error_mask;
|
||||||
|
|
||||||
|
if (at_xdmac_chan_is_cyclic(atchan))
|
||||||
|
return at_xdmac_handle_cyclic(atchan);
|
||||||
|
|
||||||
|
error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
|
||||||
|
AT_XDMAC_CIS_ROIS;
|
||||||
|
|
||||||
|
spin_lock_irq(&atchan->lock);
|
||||||
|
|
||||||
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
|
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
|
||||||
__func__, atchan->irq_status);
|
__func__, atchan->irq_status);
|
||||||
|
|
||||||
error_mask = AT_XDMAC_CIS_RBEIS
|
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
|
||||||
| AT_XDMAC_CIS_WBEIS
|
!(atchan->irq_status & error_mask)) {
|
||||||
| AT_XDMAC_CIS_ROIS;
|
|
||||||
|
|
||||||
if (at_xdmac_chan_is_cyclic(atchan)) {
|
|
||||||
at_xdmac_handle_cyclic(atchan);
|
|
||||||
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|
|
||||||
|| (atchan->irq_status & error_mask)) {
|
|
||||||
struct dma_async_tx_descriptor *txd;
|
|
||||||
|
|
||||||
if (atchan->irq_status & error_mask)
|
|
||||||
at_xdmac_handle_error(atchan);
|
|
||||||
|
|
||||||
spin_lock_irq(&atchan->lock);
|
|
||||||
desc = list_first_entry(&atchan->xfers_list,
|
|
||||||
struct at_xdmac_desc,
|
|
||||||
xfer_node);
|
|
||||||
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
|
||||||
if (!desc->active_xfer) {
|
|
||||||
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
|
|
||||||
spin_unlock_irq(&atchan->lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
txd = &desc->tx_dma_desc;
|
|
||||||
|
|
||||||
at_xdmac_remove_xfer(atchan, desc);
|
|
||||||
spin_unlock_irq(&atchan->lock);
|
|
||||||
|
|
||||||
dma_cookie_complete(txd);
|
|
||||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
|
||||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
|
||||||
|
|
||||||
dma_run_dependencies(txd);
|
|
||||||
|
|
||||||
spin_lock_irq(&atchan->lock);
|
|
||||||
at_xdmac_advance_work(atchan);
|
|
||||||
spin_unlock_irq(&atchan->lock);
|
spin_unlock_irq(&atchan->lock);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (atchan->irq_status & error_mask)
|
||||||
|
at_xdmac_handle_error(atchan);
|
||||||
|
|
||||||
|
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
|
||||||
|
xfer_node);
|
||||||
|
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||||||
|
if (!desc->active_xfer) {
|
||||||
|
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
|
||||||
|
spin_unlock_irq(&atchan->lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
txd = &desc->tx_dma_desc;
|
||||||
|
dma_cookie_complete(txd);
|
||||||
|
/* Remove the transfer from the transfer list. */
|
||||||
|
list_del(&desc->xfer_node);
|
||||||
|
spin_unlock_irq(&atchan->lock);
|
||||||
|
|
||||||
|
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||||
|
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||||
|
|
||||||
|
dma_run_dependencies(txd);
|
||||||
|
|
||||||
|
spin_lock_irq(&atchan->lock);
|
||||||
|
/* Move the xfer descriptors into the free descriptors list. */
|
||||||
|
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
|
||||||
|
at_xdmac_advance_work(atchan);
|
||||||
|
spin_unlock_irq(&atchan->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
||||||
@ -1865,8 +1857,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
|
|||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
||||||
/* Cancel all pending transfers. */
|
/* Cancel all pending transfers. */
|
||||||
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
|
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
|
||||||
at_xdmac_remove_xfer(atchan, desc);
|
list_del(&desc->xfer_node);
|
||||||
|
list_splice_tail_init(&desc->descs_list,
|
||||||
|
&atchan->free_descs_list);
|
||||||
|
}
|
||||||
|
|
||||||
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
||||||
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
|
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
|
||||||
@ -1949,8 +1944,7 @@ static void at_xdmac_axi_config(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
|
||||||
static int atmel_xdmac_prepare(struct device *dev)
|
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct dma_chan *chan, *_chan;
|
struct dma_chan *chan, *_chan;
|
||||||
@ -1964,12 +1958,8 @@ static int atmel_xdmac_prepare(struct device *dev)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
# define atmel_xdmac_prepare NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
|
||||||
static int atmel_xdmac_suspend(struct device *dev)
|
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct dma_chan *chan, *_chan;
|
struct dma_chan *chan, *_chan;
|
||||||
@ -1993,7 +1983,7 @@ static int atmel_xdmac_suspend(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atmel_xdmac_resume(struct device *dev)
|
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct at_xdmac_chan *atchan;
|
struct at_xdmac_chan *atchan;
|
||||||
@ -2031,12 +2021,11 @@ static int atmel_xdmac_resume(struct device *dev)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PM_SLEEP */
|
|
||||||
|
|
||||||
static int at_xdmac_probe(struct platform_device *pdev)
|
static int at_xdmac_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac;
|
struct at_xdmac *atxdmac;
|
||||||
int irq, size, nr_channels, i, ret;
|
int irq, nr_channels, i, ret;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
@ -2061,9 +2050,9 @@ static int at_xdmac_probe(struct platform_device *pdev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(*atxdmac);
|
atxdmac = devm_kzalloc(&pdev->dev,
|
||||||
size += nr_channels * sizeof(struct at_xdmac_chan);
|
struct_size(atxdmac, chan, nr_channels),
|
||||||
atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!atxdmac) {
|
if (!atxdmac) {
|
||||||
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
|
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -2212,7 +2201,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
|
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
|
||||||
.prepare = atmel_xdmac_prepare,
|
.prepare = atmel_xdmac_prepare,
|
||||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
|
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
|
||||||
};
|
};
|
||||||
@ -2236,7 +2225,7 @@ static struct platform_driver at_xdmac_driver = {
|
|||||||
.driver = {
|
.driver = {
|
||||||
.name = "at_xdmac",
|
.name = "at_xdmac",
|
||||||
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
|
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
|
||||||
.pm = &atmel_xdmac_dev_pm_ops,
|
.pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -104,10 +104,10 @@
|
|||||||
* descriptor base address in the upper 8 bits.
|
* descriptor base address in the upper 8 bits.
|
||||||
*/
|
*/
|
||||||
struct jz4780_dma_hwdesc {
|
struct jz4780_dma_hwdesc {
|
||||||
uint32_t dcm;
|
u32 dcm;
|
||||||
uint32_t dsa;
|
u32 dsa;
|
||||||
uint32_t dta;
|
u32 dta;
|
||||||
uint32_t dtc;
|
u32 dtc;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Size of allocations for hardware descriptor blocks. */
|
/* Size of allocations for hardware descriptor blocks. */
|
||||||
@ -122,7 +122,8 @@ struct jz4780_dma_desc {
|
|||||||
dma_addr_t desc_phys;
|
dma_addr_t desc_phys;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
enum dma_transaction_type type;
|
enum dma_transaction_type type;
|
||||||
uint32_t status;
|
u32 transfer_type;
|
||||||
|
u32 status;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct jz4780_dma_chan {
|
struct jz4780_dma_chan {
|
||||||
@ -130,8 +131,8 @@ struct jz4780_dma_chan {
|
|||||||
unsigned int id;
|
unsigned int id;
|
||||||
struct dma_pool *desc_pool;
|
struct dma_pool *desc_pool;
|
||||||
|
|
||||||
uint32_t transfer_type;
|
u32 transfer_type_tx, transfer_type_rx;
|
||||||
uint32_t transfer_shift;
|
u32 transfer_shift;
|
||||||
struct dma_slave_config config;
|
struct dma_slave_config config;
|
||||||
|
|
||||||
struct jz4780_dma_desc *desc;
|
struct jz4780_dma_desc *desc;
|
||||||
@ -152,12 +153,12 @@ struct jz4780_dma_dev {
|
|||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
const struct jz4780_dma_soc_data *soc_data;
|
const struct jz4780_dma_soc_data *soc_data;
|
||||||
|
|
||||||
uint32_t chan_reserved;
|
u32 chan_reserved;
|
||||||
struct jz4780_dma_chan chan[];
|
struct jz4780_dma_chan chan[];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct jz4780_dma_filter_data {
|
struct jz4780_dma_filter_data {
|
||||||
uint32_t transfer_type;
|
u32 transfer_type_tx, transfer_type_rx;
|
||||||
int channel;
|
int channel;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -179,26 +180,26 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
|
|||||||
dma_device);
|
dma_device);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
|
static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
|
||||||
unsigned int chn, unsigned int reg)
|
unsigned int chn, unsigned int reg)
|
||||||
{
|
{
|
||||||
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
|
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
|
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
|
||||||
unsigned int chn, unsigned int reg, uint32_t val)
|
unsigned int chn, unsigned int reg, u32 val)
|
||||||
{
|
{
|
||||||
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
|
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
|
static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
|
||||||
unsigned int reg)
|
unsigned int reg)
|
||||||
{
|
{
|
||||||
return readl(jzdma->ctrl_base + reg);
|
return readl(jzdma->ctrl_base + reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
|
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
|
||||||
unsigned int reg, uint32_t val)
|
unsigned int reg, u32 val)
|
||||||
{
|
{
|
||||||
writel(val, jzdma->ctrl_base + reg);
|
writel(val, jzdma->ctrl_base + reg);
|
||||||
}
|
}
|
||||||
@ -226,9 +227,10 @@ static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
|
|||||||
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
|
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
|
static struct jz4780_dma_desc *
|
||||||
struct jz4780_dma_chan *jzchan, unsigned int count,
|
jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
|
||||||
enum dma_transaction_type type)
|
enum dma_transaction_type type,
|
||||||
|
enum dma_transfer_direction direction)
|
||||||
{
|
{
|
||||||
struct jz4780_dma_desc *desc;
|
struct jz4780_dma_desc *desc;
|
||||||
|
|
||||||
@ -248,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
|
|||||||
|
|
||||||
desc->count = count;
|
desc->count = count;
|
||||||
desc->type = type;
|
desc->type = type;
|
||||||
|
|
||||||
|
if (direction == DMA_DEV_TO_MEM)
|
||||||
|
desc->transfer_type = jzchan->transfer_type_rx;
|
||||||
|
else
|
||||||
|
desc->transfer_type = jzchan->transfer_type_tx;
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,8 +268,8 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
|
|||||||
kfree(desc);
|
kfree(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
|
static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
|
||||||
unsigned long val, uint32_t *shift)
|
unsigned long val, u32 *shift)
|
||||||
{
|
{
|
||||||
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
|
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
|
||||||
int ord = ffs(val) - 1;
|
int ord = ffs(val) - 1;
|
||||||
@ -303,7 +311,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
|
|||||||
enum dma_transfer_direction direction)
|
enum dma_transfer_direction direction)
|
||||||
{
|
{
|
||||||
struct dma_slave_config *config = &jzchan->config;
|
struct dma_slave_config *config = &jzchan->config;
|
||||||
uint32_t width, maxburst, tsz;
|
u32 width, maxburst, tsz;
|
||||||
|
|
||||||
if (direction == DMA_MEM_TO_DEV) {
|
if (direction == DMA_MEM_TO_DEV) {
|
||||||
desc->dcm = JZ_DMA_DCM_SAI;
|
desc->dcm = JZ_DMA_DCM_SAI;
|
||||||
@ -361,7 +369,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
|
desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -410,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
|
|||||||
|
|
||||||
periods = buf_len / period_len;
|
periods = buf_len / period_len;
|
||||||
|
|
||||||
desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
|
desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -453,16 +461,16 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
|
|||||||
{
|
{
|
||||||
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
|
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
|
||||||
struct jz4780_dma_desc *desc;
|
struct jz4780_dma_desc *desc;
|
||||||
uint32_t tsz;
|
u32 tsz;
|
||||||
|
|
||||||
desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
|
desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
|
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
|
||||||
&jzchan->transfer_shift);
|
&jzchan->transfer_shift);
|
||||||
|
|
||||||
jzchan->transfer_type = JZ_DMA_DRT_AUTO;
|
desc->transfer_type = JZ_DMA_DRT_AUTO;
|
||||||
|
|
||||||
desc->desc[0].dsa = src;
|
desc->desc[0].dsa = src;
|
||||||
desc->desc[0].dta = dest;
|
desc->desc[0].dta = dest;
|
||||||
@ -528,7 +536,7 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
|
|||||||
|
|
||||||
/* Set transfer type. */
|
/* Set transfer type. */
|
||||||
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
|
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
|
||||||
jzchan->transfer_type);
|
jzchan->desc->transfer_type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the transfer count. This is redundant for a descriptor-driven
|
* Set the transfer count. This is redundant for a descriptor-driven
|
||||||
@ -670,7 +678,7 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
|
|||||||
{
|
{
|
||||||
const unsigned int soc_flags = jzdma->soc_data->flags;
|
const unsigned int soc_flags = jzdma->soc_data->flags;
|
||||||
struct jz4780_dma_desc *desc = jzchan->desc;
|
struct jz4780_dma_desc *desc = jzchan->desc;
|
||||||
uint32_t dcs;
|
u32 dcs;
|
||||||
bool ack = true;
|
bool ack = true;
|
||||||
|
|
||||||
spin_lock(&jzchan->vchan.lock);
|
spin_lock(&jzchan->vchan.lock);
|
||||||
@ -727,7 +735,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
|
|||||||
struct jz4780_dma_dev *jzdma = data;
|
struct jz4780_dma_dev *jzdma = data;
|
||||||
unsigned int nb_channels = jzdma->soc_data->nb_channels;
|
unsigned int nb_channels = jzdma->soc_data->nb_channels;
|
||||||
unsigned long pending;
|
unsigned long pending;
|
||||||
uint32_t dmac;
|
u32 dmac;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
|
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
|
||||||
@ -788,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
jzchan->transfer_type = data->transfer_type;
|
jzchan->transfer_type_tx = data->transfer_type_tx;
|
||||||
|
jzchan->transfer_type_rx = data->transfer_type_rx;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -800,11 +809,17 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
|
|||||||
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
|
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
|
||||||
struct jz4780_dma_filter_data data;
|
struct jz4780_dma_filter_data data;
|
||||||
|
|
||||||
if (dma_spec->args_count != 2)
|
if (dma_spec->args_count == 2) {
|
||||||
|
data.transfer_type_tx = dma_spec->args[0];
|
||||||
|
data.transfer_type_rx = dma_spec->args[0];
|
||||||
|
data.channel = dma_spec->args[1];
|
||||||
|
} else if (dma_spec->args_count == 3) {
|
||||||
|
data.transfer_type_tx = dma_spec->args[0];
|
||||||
|
data.transfer_type_rx = dma_spec->args[1];
|
||||||
|
data.channel = dma_spec->args[2];
|
||||||
|
} else {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
data.transfer_type = dma_spec->args[0];
|
|
||||||
data.channel = dma_spec->args[1];
|
|
||||||
|
|
||||||
if (data.channel > -1) {
|
if (data.channel > -1) {
|
||||||
if (data.channel >= jzdma->soc_data->nb_channels) {
|
if (data.channel >= jzdma->soc_data->nb_channels) {
|
||||||
@ -822,7 +837,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
jzdma->chan[data.channel].transfer_type = data.transfer_type;
|
jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
|
||||||
|
jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
|
||||||
|
|
||||||
return dma_get_slave_channel(
|
return dma_get_slave_channel(
|
||||||
&jzdma->chan[data.channel].vchan.chan);
|
&jzdma->chan[data.channel].vchan.chan);
|
||||||
@ -915,6 +931,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||||||
dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
|
dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
|
||||||
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||||
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||||
|
dd->max_sg_burst = JZ_DMA_MAX_DESC;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable DMA controller, mark all channels as not programmable.
|
* Enable DMA controller, mark all channels as not programmable.
|
||||||
@ -937,6 +954,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||||||
jzchan->vchan.desc_free = jz4780_dma_desc_free;
|
jzchan->vchan.desc_free = jz4780_dma_desc_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On JZ4760, chan0 won't enable properly the first time.
|
||||||
|
* Enabling then disabling chan1 will magically make chan0 work
|
||||||
|
* correctly.
|
||||||
|
*/
|
||||||
|
jz4780_dma_chan_enable(jzdma, 1);
|
||||||
|
jz4780_dma_chan_disable(jzdma, 1);
|
||||||
|
|
||||||
ret = platform_get_irq(pdev, 0);
|
ret = platform_get_irq(pdev, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_disable_clk;
|
goto err_disable_clk;
|
||||||
@ -1010,12 +1035,36 @@ static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
|
|||||||
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = {
|
||||||
|
.nb_channels = 2,
|
||||||
|
.transfer_ord_max = 6,
|
||||||
|
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = {
|
||||||
|
.nb_channels = 3,
|
||||||
|
.transfer_ord_max = 6,
|
||||||
|
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
|
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
|
||||||
.nb_channels = 5,
|
.nb_channels = 5,
|
||||||
.transfer_ord_max = 6,
|
.transfer_ord_max = 6,
|
||||||
.flags = JZ_SOC_DATA_PER_CHAN_PM,
|
.flags = JZ_SOC_DATA_PER_CHAN_PM,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = {
|
||||||
|
.nb_channels = 2,
|
||||||
|
.transfer_ord_max = 6,
|
||||||
|
.flags = JZ_SOC_DATA_PER_CHAN_PM,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = {
|
||||||
|
.nb_channels = 3,
|
||||||
|
.transfer_ord_max = 6,
|
||||||
|
.flags = JZ_SOC_DATA_PER_CHAN_PM,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
||||||
.nb_channels = 6,
|
.nb_channels = 6,
|
||||||
.transfer_ord_max = 6,
|
.transfer_ord_max = 6,
|
||||||
@ -1044,7 +1093,11 @@ static const struct of_device_id jz4780_dma_dt_match[] = {
|
|||||||
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
|
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
|
||||||
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
|
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
|
||||||
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
|
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
|
||||||
|
{ .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data },
|
||||||
|
{ .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data },
|
||||||
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
|
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
|
||||||
|
{ .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data },
|
||||||
|
{ .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data },
|
||||||
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
|
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
|
||||||
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
|
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
|
||||||
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
|
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
|
||||||
|
@ -695,13 +695,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
|
|||||||
*/
|
*/
|
||||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
int err = -EBUSY;
|
|
||||||
|
|
||||||
/* lock against __dma_request_channel */
|
/* lock against __dma_request_channel */
|
||||||
mutex_lock(&dma_list_mutex);
|
mutex_lock(&dma_list_mutex);
|
||||||
|
|
||||||
if (chan->client_count == 0) {
|
if (chan->client_count == 0) {
|
||||||
struct dma_device *device = chan->device;
|
struct dma_device *device = chan->device;
|
||||||
|
int err;
|
||||||
|
|
||||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||||
device->privatecnt++;
|
device->privatecnt++;
|
||||||
@ -1160,6 +1159,13 @@ int dma_async_device_register(struct dma_device *device)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
|
||||||
|
dev_err(device->dev,
|
||||||
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
"DMA_MEMCPY_SG");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
||||||
dev_err(device->dev,
|
dev_err(device->dev,
|
||||||
"Device claims capability %s, but op is not defined\n",
|
"Device claims capability %s, but op is not defined\n",
|
||||||
|
@ -79,6 +79,32 @@ axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
|
|||||||
iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
|
iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void axi_chan_config_write(struct axi_dma_chan *chan,
|
||||||
|
struct axi_dma_chan_config *config)
|
||||||
|
{
|
||||||
|
u32 cfg_lo, cfg_hi;
|
||||||
|
|
||||||
|
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
|
||||||
|
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
|
||||||
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
|
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
|
||||||
|
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
|
||||||
|
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
|
||||||
|
config->src_per << CH_CFG_H_SRC_PER_POS |
|
||||||
|
config->dst_per << CH_CFG_H_DST_PER_POS |
|
||||||
|
config->prior << CH_CFG_H_PRIORITY_POS;
|
||||||
|
} else {
|
||||||
|
cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
|
||||||
|
config->dst_per << CH_CFG2_L_DST_PER_POS;
|
||||||
|
cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
|
||||||
|
config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
|
||||||
|
config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
|
||||||
|
config->prior << CH_CFG2_H_PRIORITY_POS;
|
||||||
|
}
|
||||||
|
axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
|
||||||
|
axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void axi_dma_disable(struct axi_dma_chip *chip)
|
static inline void axi_dma_disable(struct axi_dma_chip *chip)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
@ -154,7 +180,10 @@ static inline void axi_chan_disable(struct axi_dma_chan *chan)
|
|||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
|
val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
if (chan->chip->dw->hdata->reg_map_8_channels)
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
||||||
|
else
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,8 +192,12 @@ static inline void axi_chan_enable(struct axi_dma_chan *chan)
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
if (chan->chip->dw->hdata->reg_map_8_channels)
|
||||||
BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
||||||
|
else
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,12 +212,16 @@ static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
|
|||||||
|
|
||||||
static void axi_dma_hw_init(struct axi_dma_chip *chip)
|
static void axi_dma_hw_init(struct axi_dma_chip *chip)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
|
for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
|
||||||
axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
|
axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
|
||||||
axi_chan_disable(&chip->dw->chan[i]);
|
axi_chan_disable(&chip->dw->chan[i]);
|
||||||
}
|
}
|
||||||
|
ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
|
||||||
|
if (ret)
|
||||||
|
dev_warn(chip->dev, "Unable to set coherent mask\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
|
static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
|
||||||
@ -336,7 +373,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
|||||||
struct axi_dma_desc *first)
|
struct axi_dma_desc *first)
|
||||||
{
|
{
|
||||||
u32 priority = chan->chip->dw->hdata->priority[chan->id];
|
u32 priority = chan->chip->dw->hdata->priority[chan->id];
|
||||||
u32 reg, irq_mask;
|
struct axi_dma_chan_config config = {};
|
||||||
|
u32 irq_mask;
|
||||||
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
||||||
|
|
||||||
if (unlikely(axi_chan_is_hw_enable(chan))) {
|
if (unlikely(axi_chan_is_hw_enable(chan))) {
|
||||||
@ -348,36 +386,36 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
|||||||
|
|
||||||
axi_dma_enable(chan->chip);
|
axi_dma_enable(chan->chip);
|
||||||
|
|
||||||
reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
|
config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
|
||||||
DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
|
config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
|
||||||
axi_chan_iowrite32(chan, CH_CFG_L, reg);
|
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
|
||||||
|
config.prior = priority;
|
||||||
reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
|
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
|
||||||
priority << CH_CFG_H_PRIORITY_POS |
|
config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
|
||||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
|
|
||||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
|
|
||||||
switch (chan->direction) {
|
switch (chan->direction) {
|
||||||
case DMA_MEM_TO_DEV:
|
case DMA_MEM_TO_DEV:
|
||||||
dw_axi_dma_set_byte_halfword(chan, true);
|
dw_axi_dma_set_byte_halfword(chan, true);
|
||||||
reg |= (chan->config.device_fc ?
|
config.tt_fc = chan->config.device_fc ?
|
||||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
|
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
|
||||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
|
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
|
||||||
<< CH_CFG_H_TT_FC_POS;
|
|
||||||
if (chan->chip->apb_regs)
|
if (chan->chip->apb_regs)
|
||||||
reg |= (chan->id << CH_CFG_H_DST_PER_POS);
|
config.dst_per = chan->id;
|
||||||
|
else
|
||||||
|
config.dst_per = chan->hw_handshake_num;
|
||||||
break;
|
break;
|
||||||
case DMA_DEV_TO_MEM:
|
case DMA_DEV_TO_MEM:
|
||||||
reg |= (chan->config.device_fc ?
|
config.tt_fc = chan->config.device_fc ?
|
||||||
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
|
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
|
||||||
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
|
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
|
||||||
<< CH_CFG_H_TT_FC_POS;
|
|
||||||
if (chan->chip->apb_regs)
|
if (chan->chip->apb_regs)
|
||||||
reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
|
config.src_per = chan->id;
|
||||||
|
else
|
||||||
|
config.src_per = chan->hw_handshake_num;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
axi_chan_iowrite32(chan, CH_CFG_H, reg);
|
axi_chan_config_write(chan, &config);
|
||||||
|
|
||||||
write_chan_llp(chan, first->hw_desc[0].llp | lms);
|
write_chan_llp(chan, first->hw_desc[0].llp | lms);
|
||||||
|
|
||||||
@ -1120,10 +1158,16 @@ static int dma_chan_pause(struct dma_chan *dchan)
|
|||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
|
val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
|
} else {
|
||||||
|
val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
|
if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
|
||||||
@ -1147,9 +1191,15 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
|
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
|
} else {
|
||||||
|
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
|
||||||
|
val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
|
||||||
|
}
|
||||||
|
|
||||||
chan->is_paused = false;
|
chan->is_paused = false;
|
||||||
}
|
}
|
||||||
@ -1241,6 +1291,8 @@ static int parse_device_properties(struct axi_dma_chip *chip)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
chip->dw->hdata->nr_channels = tmp;
|
chip->dw->hdata->nr_channels = tmp;
|
||||||
|
if (tmp <= DMA_REG_MAP_CH_REF)
|
||||||
|
chip->dw->hdata->reg_map_8_channels = true;
|
||||||
|
|
||||||
ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
|
ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
#include "../virt-dma.h"
|
#include "../virt-dma.h"
|
||||||
|
|
||||||
#define DMAC_MAX_CHANNELS 8
|
#define DMAC_MAX_CHANNELS 16
|
||||||
#define DMAC_MAX_MASTERS 2
|
#define DMAC_MAX_MASTERS 2
|
||||||
#define DMAC_MAX_BLK_SIZE 0x200000
|
#define DMAC_MAX_BLK_SIZE 0x200000
|
||||||
|
|
||||||
@ -30,6 +30,8 @@ struct dw_axi_dma_hcfg {
|
|||||||
u32 priority[DMAC_MAX_CHANNELS];
|
u32 priority[DMAC_MAX_CHANNELS];
|
||||||
/* maximum supported axi burst length */
|
/* maximum supported axi burst length */
|
||||||
u32 axi_rw_burst_len;
|
u32 axi_rw_burst_len;
|
||||||
|
/* Register map for DMAX_NUM_CHANNELS <= 8 */
|
||||||
|
bool reg_map_8_channels;
|
||||||
bool restrict_axi_burst_len;
|
bool restrict_axi_burst_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -103,6 +105,17 @@ struct axi_dma_desc {
|
|||||||
u32 period_len;
|
u32 period_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct axi_dma_chan_config {
|
||||||
|
u8 dst_multblk_type;
|
||||||
|
u8 src_multblk_type;
|
||||||
|
u8 dst_per;
|
||||||
|
u8 src_per;
|
||||||
|
u8 tt_fc;
|
||||||
|
u8 prior;
|
||||||
|
u8 hs_sel_dst;
|
||||||
|
u8 hs_sel_src;
|
||||||
|
};
|
||||||
|
|
||||||
static inline struct device *dchan2dev(struct dma_chan *dchan)
|
static inline struct device *dchan2dev(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
return &dchan->dev->device;
|
return &dchan->dev->device;
|
||||||
@ -139,6 +152,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
|||||||
#define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */
|
#define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */
|
||||||
#define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */
|
#define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */
|
||||||
#define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */
|
#define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */
|
||||||
|
#define DMAC_CHSUSPREG 0x020 /* R/W DMAC Channel Suspend */
|
||||||
|
#define DMAC_CHABORTREG 0x028 /* R/W DMAC Channel Abort */
|
||||||
#define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */
|
#define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */
|
||||||
#define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */
|
#define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */
|
||||||
#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
|
#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
|
||||||
@ -187,6 +202,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
|||||||
#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
|
#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
|
||||||
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
|
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
|
||||||
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
|
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
|
||||||
|
#define DMA_REG_MAP_CH_REF 0x08 /* Channel count to choose register map */
|
||||||
|
|
||||||
/* DMAC_CFG */
|
/* DMAC_CFG */
|
||||||
#define DMAC_EN_POS 0
|
#define DMAC_EN_POS 0
|
||||||
@ -195,12 +211,20 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
|||||||
#define INT_EN_POS 1
|
#define INT_EN_POS 1
|
||||||
#define INT_EN_MASK BIT(INT_EN_POS)
|
#define INT_EN_MASK BIT(INT_EN_POS)
|
||||||
|
|
||||||
|
/* DMAC_CHEN */
|
||||||
#define DMAC_CHAN_EN_SHIFT 0
|
#define DMAC_CHAN_EN_SHIFT 0
|
||||||
#define DMAC_CHAN_EN_WE_SHIFT 8
|
#define DMAC_CHAN_EN_WE_SHIFT 8
|
||||||
|
|
||||||
#define DMAC_CHAN_SUSP_SHIFT 16
|
#define DMAC_CHAN_SUSP_SHIFT 16
|
||||||
#define DMAC_CHAN_SUSP_WE_SHIFT 24
|
#define DMAC_CHAN_SUSP_WE_SHIFT 24
|
||||||
|
|
||||||
|
/* DMAC_CHEN2 */
|
||||||
|
#define DMAC_CHAN_EN2_WE_SHIFT 16
|
||||||
|
|
||||||
|
/* DMAC_CHSUSP */
|
||||||
|
#define DMAC_CHAN_SUSP2_SHIFT 0
|
||||||
|
#define DMAC_CHAN_SUSP2_WE_SHIFT 16
|
||||||
|
|
||||||
/* CH_CTL_H */
|
/* CH_CTL_H */
|
||||||
#define CH_CTL_H_ARLEN_EN BIT(6)
|
#define CH_CTL_H_ARLEN_EN BIT(6)
|
||||||
#define CH_CTL_H_ARLEN_POS 7
|
#define CH_CTL_H_ARLEN_POS 7
|
||||||
@ -289,6 +313,15 @@ enum {
|
|||||||
DWAXIDMAC_MBLK_TYPE_LL
|
DWAXIDMAC_MBLK_TYPE_LL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* CH_CFG2 */
|
||||||
|
#define CH_CFG2_L_SRC_PER_POS 4
|
||||||
|
#define CH_CFG2_L_DST_PER_POS 11
|
||||||
|
|
||||||
|
#define CH_CFG2_H_TT_FC_POS 0
|
||||||
|
#define CH_CFG2_H_HS_SEL_SRC_POS 3
|
||||||
|
#define CH_CFG2_H_HS_SEL_DST_POS 4
|
||||||
|
#define CH_CFG2_H_PRIORITY_POS 20
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DW AXI DMA channel interrupts
|
* DW AXI DMA channel interrupts
|
||||||
*
|
*
|
||||||
|
@ -249,7 +249,6 @@ static int dw_edma_device_terminate_all(struct dma_chan *dchan)
|
|||||||
{
|
{
|
||||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
LIST_HEAD(head);
|
|
||||||
|
|
||||||
if (!chan->configured) {
|
if (!chan->configured) {
|
||||||
/* Do nothing */
|
/* Do nothing */
|
||||||
|
@ -186,27 +186,10 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
|
|||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
/* DMA configuration */
|
/* DMA configuration */
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (!err) {
|
if (err) {
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
||||||
if (err) {
|
|
||||||
pci_err(pdev, "consistent DMA mask 64 set failed\n");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pci_err(pdev, "DMA mask 64 set failed\n");
|
pci_err(pdev, "DMA mask 64 set failed\n");
|
||||||
|
return err;
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (err) {
|
|
||||||
pci_err(pdev, "DMA mask 32 set failed\n");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (err) {
|
|
||||||
pci_err(pdev, "consistent DMA mask 32 set failed\n");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data structure allocation */
|
/* Data structure allocation */
|
||||||
|
@ -32,11 +32,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -348,6 +348,7 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
|||||||
struct fsl_edma_engine *edma = fsl_chan->edma;
|
struct fsl_edma_engine *edma = fsl_chan->edma;
|
||||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||||
|
u16 csr = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
|
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
|
||||||
@ -373,6 +374,12 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
|||||||
edma_writel(edma, (s32)tcd->dlast_sga,
|
edma_writel(edma, (s32)tcd->dlast_sga,
|
||||||
®s->tcd[ch].dlast_sga);
|
®s->tcd[ch].dlast_sga);
|
||||||
|
|
||||||
|
if (fsl_chan->is_sw) {
|
||||||
|
csr = le16_to_cpu(tcd->csr);
|
||||||
|
csr |= EDMA_TCD_CSR_START;
|
||||||
|
tcd->csr = cpu_to_le16(csr);
|
||||||
|
}
|
||||||
|
|
||||||
edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
|
edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -587,6 +594,29 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
|
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
|
||||||
|
|
||||||
|
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||||
|
dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||||
|
size_t len, unsigned long flags)
|
||||||
|
{
|
||||||
|
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||||
|
struct fsl_edma_desc *fsl_desc;
|
||||||
|
|
||||||
|
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
|
||||||
|
if (!fsl_desc)
|
||||||
|
return NULL;
|
||||||
|
fsl_desc->iscyclic = false;
|
||||||
|
|
||||||
|
fsl_chan->is_sw = true;
|
||||||
|
|
||||||
|
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
||||||
|
fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||||
|
EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
|
||||||
|
32, len, 0, 1, 1, 32, 0, true, true, false);
|
||||||
|
|
||||||
|
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
|
||||||
|
|
||||||
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
||||||
{
|
{
|
||||||
struct virt_dma_desc *vdesc;
|
struct virt_dma_desc *vdesc;
|
||||||
@ -638,12 +668,14 @@ EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
|
|||||||
void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||||
|
struct fsl_edma_engine *edma = fsl_chan->edma;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
|
|
||||||
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
|
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
|
||||||
fsl_edma_disable_request(fsl_chan);
|
fsl_edma_disable_request(fsl_chan);
|
||||||
fsl_edma_chan_mux(fsl_chan, 0, false);
|
if (edma->drvdata->dmamuxs)
|
||||||
|
fsl_edma_chan_mux(fsl_chan, 0, false);
|
||||||
fsl_chan->edesc = NULL;
|
fsl_chan->edesc = NULL;
|
||||||
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
|
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
|
||||||
fsl_edma_unprep_slave_dma(fsl_chan);
|
fsl_edma_unprep_slave_dma(fsl_chan);
|
||||||
@ -652,6 +684,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
|||||||
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
|
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
|
||||||
dma_pool_destroy(fsl_chan->tcd_pool);
|
dma_pool_destroy(fsl_chan->tcd_pool);
|
||||||
fsl_chan->tcd_pool = NULL;
|
fsl_chan->tcd_pool = NULL;
|
||||||
|
fsl_chan->is_sw = false;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
|
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
|
||||||
|
|
||||||
|
@ -121,6 +121,7 @@ struct fsl_edma_chan {
|
|||||||
struct fsl_edma_desc *edesc;
|
struct fsl_edma_desc *edesc;
|
||||||
struct dma_slave_config cfg;
|
struct dma_slave_config cfg;
|
||||||
u32 attr;
|
u32 attr;
|
||||||
|
bool is_sw;
|
||||||
struct dma_pool *tcd_pool;
|
struct dma_pool *tcd_pool;
|
||||||
dma_addr_t dma_dev_addr;
|
dma_addr_t dma_dev_addr;
|
||||||
u32 dma_dev_size;
|
u32 dma_dev_size;
|
||||||
@ -240,6 +241,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
|||||||
struct dma_chan *chan, struct scatterlist *sgl,
|
struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||||
unsigned long flags, void *context);
|
unsigned long flags, void *context);
|
||||||
|
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
|
||||||
|
struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||||
|
size_t len, unsigned long flags);
|
||||||
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
|
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
|
||||||
void fsl_edma_issue_pending(struct dma_chan *chan);
|
void fsl_edma_issue_pending(struct dma_chan *chan);
|
||||||
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
|
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
#include "fsl-edma-common.h"
|
#include "fsl-edma-common.h"
|
||||||
|
|
||||||
@ -372,6 +373,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||||||
dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
|
||||||
dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
|
||||||
dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
|
||||||
|
dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
|
||||||
|
|
||||||
fsl_edma->dma_dev.dev = &pdev->dev;
|
fsl_edma->dma_dev.dev = &pdev->dev;
|
||||||
fsl_edma->dma_dev.device_alloc_chan_resources
|
fsl_edma->dma_dev.device_alloc_chan_resources
|
||||||
@ -381,6 +383,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||||||
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
|
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
|
||||||
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
|
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
|
||||||
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
|
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
|
||||||
|
fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
|
||||||
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
|
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
|
||||||
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
|
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
|
||||||
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
|
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
|
||||||
@ -392,6 +395,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||||||
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
|
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
|
||||||
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||||
|
|
||||||
|
fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
|
||||||
|
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
||||||
|
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
|
||||||
|
|
||||||
platform_set_drvdata(pdev, fsl_edma);
|
platform_set_drvdata(pdev, fsl_edma);
|
||||||
|
|
||||||
ret = dma_async_device_register(&fsl_edma->dma_dev);
|
ret = dma_async_device_register(&fsl_edma->dma_dev);
|
||||||
|
@ -519,11 +519,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -65,11 +65,7 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -19,30 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
|
|||||||
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
||||||
|
|
||||||
/* Interrupt control bits */
|
/* Interrupt control bits */
|
||||||
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
|
|
||||||
{
|
|
||||||
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
|
|
||||||
|
|
||||||
pci_msi_mask_irq(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void idxd_mask_msix_vectors(struct idxd_device *idxd)
|
|
||||||
{
|
|
||||||
struct pci_dev *pdev = idxd->pdev;
|
|
||||||
int msixcnt = pci_msix_vec_count(pdev);
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++)
|
|
||||||
idxd_mask_msix_vector(idxd, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
|
|
||||||
{
|
|
||||||
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
|
|
||||||
|
|
||||||
pci_msi_unmask_irq(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
|
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
union genctrl_reg genctrl;
|
union genctrl_reg genctrl;
|
||||||
@ -135,8 +111,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
|||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
int rc, num_descs, i;
|
int rc, num_descs, i;
|
||||||
int align;
|
|
||||||
u64 tmp;
|
|
||||||
|
|
||||||
if (wq->type != IDXD_WQT_KERNEL)
|
if (wq->type != IDXD_WQT_KERNEL)
|
||||||
return 0;
|
return 0;
|
||||||
@ -148,21 +122,13 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
|||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
align = idxd->data->align;
|
wq->compls_size = num_descs * idxd->data->compl_size;
|
||||||
wq->compls_size = num_descs * idxd->data->compl_size + align;
|
wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
|
||||||
wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
|
if (!wq->compls) {
|
||||||
&wq->compls_addr_raw, GFP_KERNEL);
|
|
||||||
if (!wq->compls_raw) {
|
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto fail_alloc_compls;
|
goto fail_alloc_compls;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust alignment */
|
|
||||||
wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
|
|
||||||
tmp = (u64)wq->compls_raw;
|
|
||||||
tmp = (tmp + (align - 1)) & ~(align - 1);
|
|
||||||
wq->compls = (struct dsa_completion_record *)tmp;
|
|
||||||
|
|
||||||
rc = alloc_descs(wq, num_descs);
|
rc = alloc_descs(wq, num_descs);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail_alloc_descs;
|
goto fail_alloc_descs;
|
||||||
@ -191,8 +157,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
|||||||
fail_sbitmap_init:
|
fail_sbitmap_init:
|
||||||
free_descs(wq);
|
free_descs(wq);
|
||||||
fail_alloc_descs:
|
fail_alloc_descs:
|
||||||
dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
|
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
|
||||||
wq->compls_addr_raw);
|
|
||||||
fail_alloc_compls:
|
fail_alloc_compls:
|
||||||
free_hw_descs(wq);
|
free_hw_descs(wq);
|
||||||
return rc;
|
return rc;
|
||||||
@ -207,8 +172,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
|
|||||||
|
|
||||||
free_hw_descs(wq);
|
free_hw_descs(wq);
|
||||||
free_descs(wq);
|
free_descs(wq);
|
||||||
dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
|
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
|
||||||
wq->compls_addr_raw);
|
|
||||||
sbitmap_queue_free(&wq->sbq);
|
sbitmap_queue_free(&wq->sbq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,9 +361,12 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
|||||||
wq->threshold = 0;
|
wq->threshold = 0;
|
||||||
wq->priority = 0;
|
wq->priority = 0;
|
||||||
wq->ats_dis = 0;
|
wq->ats_dis = 0;
|
||||||
|
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
|
||||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||||
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
||||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||||
|
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
|
||||||
|
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
|
static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
|
||||||
@ -423,17 +390,29 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
|
|||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
memset(&wq->wq_active, 0, sizeof(wq->wq_active));
|
memset(&wq->wq_active, 0, sizeof(wq->wq_active));
|
||||||
rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
|
rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
|
||||||
|
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
reinit_completion(&wq->wq_dead);
|
reinit_completion(&wq->wq_dead);
|
||||||
|
reinit_completion(&wq->wq_resurrect);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __idxd_wq_quiesce(struct idxd_wq *wq)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&wq->wq_lock);
|
||||||
|
reinit_completion(&wq->wq_resurrect);
|
||||||
|
percpu_ref_kill(&wq->wq_active);
|
||||||
|
complete_all(&wq->wq_resurrect);
|
||||||
|
wait_for_completion(&wq->wq_dead);
|
||||||
|
}
|
||||||
|
|
||||||
void idxd_wq_quiesce(struct idxd_wq *wq)
|
void idxd_wq_quiesce(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
percpu_ref_kill(&wq->wq_active);
|
mutex_lock(&wq->wq_lock);
|
||||||
wait_for_completion(&wq->wq_dead);
|
__idxd_wq_quiesce(wq);
|
||||||
|
mutex_unlock(&wq->wq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Device control bits */
|
/* Device control bits */
|
||||||
@ -591,7 +570,6 @@ void idxd_device_reset(struct idxd_device *idxd)
|
|||||||
idxd_device_clear_state(idxd);
|
idxd_device_clear_state(idxd);
|
||||||
idxd->state = IDXD_DEV_DISABLED;
|
idxd->state = IDXD_DEV_DISABLED;
|
||||||
idxd_unmask_error_interrupts(idxd);
|
idxd_unmask_error_interrupts(idxd);
|
||||||
idxd_msix_perm_setup(idxd);
|
|
||||||
spin_unlock(&idxd->dev_lock);
|
spin_unlock(&idxd->dev_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -700,9 +678,9 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
|
|||||||
memset(&group->grpcfg, 0, sizeof(group->grpcfg));
|
memset(&group->grpcfg, 0, sizeof(group->grpcfg));
|
||||||
group->num_engines = 0;
|
group->num_engines = 0;
|
||||||
group->num_wqs = 0;
|
group->num_wqs = 0;
|
||||||
group->use_token_limit = false;
|
group->use_rdbuf_limit = false;
|
||||||
group->tokens_allowed = 0;
|
group->rdbufs_allowed = 0;
|
||||||
group->tokens_reserved = 0;
|
group->rdbufs_reserved = 0;
|
||||||
group->tc_a = -1;
|
group->tc_a = -1;
|
||||||
group->tc_b = -1;
|
group->tc_b = -1;
|
||||||
}
|
}
|
||||||
@ -731,36 +709,6 @@ void idxd_device_clear_state(struct idxd_device *idxd)
|
|||||||
idxd_device_wqs_clear_state(idxd);
|
idxd_device_wqs_clear_state(idxd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idxd_msix_perm_setup(struct idxd_device *idxd)
|
|
||||||
{
|
|
||||||
union msix_perm mperm;
|
|
||||||
int i, msixcnt;
|
|
||||||
|
|
||||||
msixcnt = pci_msix_vec_count(idxd->pdev);
|
|
||||||
if (msixcnt < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
mperm.bits = 0;
|
|
||||||
mperm.pasid = idxd->pasid;
|
|
||||||
mperm.pasid_en = device_pasid_enabled(idxd);
|
|
||||||
for (i = 1; i < msixcnt; i++)
|
|
||||||
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
void idxd_msix_perm_clear(struct idxd_device *idxd)
|
|
||||||
{
|
|
||||||
union msix_perm mperm;
|
|
||||||
int i, msixcnt;
|
|
||||||
|
|
||||||
msixcnt = pci_msix_vec_count(idxd->pdev);
|
|
||||||
if (msixcnt < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
mperm.bits = 0;
|
|
||||||
for (i = 1; i < msixcnt; i++)
|
|
||||||
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void idxd_group_config_write(struct idxd_group *group)
|
static void idxd_group_config_write(struct idxd_group *group)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = group->idxd;
|
struct idxd_device *idxd = group->idxd;
|
||||||
@ -800,10 +748,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
|
|||||||
int i;
|
int i;
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
|
||||||
/* Setup bandwidth token limit */
|
/* Setup bandwidth rdbuf limit */
|
||||||
if (idxd->token_limit) {
|
if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
|
||||||
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
reg.token_limit = idxd->token_limit;
|
reg.rdbuf_limit = idxd->rdbuf_limit;
|
||||||
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -847,15 +795,12 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||||||
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
|
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
|
||||||
|
wq->size = WQ_DEFAULT_QUEUE_DEPTH;
|
||||||
|
|
||||||
/* byte 0-3 */
|
/* byte 0-3 */
|
||||||
wq->wqcfg->wq_size = wq->size;
|
wq->wqcfg->wq_size = wq->size;
|
||||||
|
|
||||||
if (wq->size == 0) {
|
|
||||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
|
|
||||||
dev_warn(dev, "Incorrect work queue size: 0\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bytes 4-7 */
|
/* bytes 4-7 */
|
||||||
wq->wqcfg->wq_thresh = wq->threshold;
|
wq->wqcfg->wq_thresh = wq->threshold;
|
||||||
|
|
||||||
@ -944,13 +889,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
|
|||||||
group->tc_b = group->grpcfg.flags.tc_b = 1;
|
group->tc_b = group->grpcfg.flags.tc_b = 1;
|
||||||
else
|
else
|
||||||
group->grpcfg.flags.tc_b = group->tc_b;
|
group->grpcfg.flags.tc_b = group->tc_b;
|
||||||
group->grpcfg.flags.use_token_limit = group->use_token_limit;
|
group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
|
||||||
group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
|
group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
|
||||||
if (group->tokens_allowed)
|
if (group->rdbufs_allowed)
|
||||||
group->grpcfg.flags.tokens_allowed =
|
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
|
||||||
group->tokens_allowed;
|
|
||||||
else
|
else
|
||||||
group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
|
group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1001,8 +945,6 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
|
|||||||
|
|
||||||
if (!wq->group)
|
if (!wq->group)
|
||||||
continue;
|
continue;
|
||||||
if (!wq->size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (wq_shared(wq) && !device_swq_supported(idxd)) {
|
if (wq_shared(wq) && !device_swq_supported(idxd)) {
|
||||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
|
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
|
||||||
@ -1060,8 +1002,6 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
|
|||||||
|
|
||||||
wq->size = wq->wqcfg->wq_size;
|
wq->size = wq->wqcfg->wq_size;
|
||||||
wq->threshold = wq->wqcfg->wq_thresh;
|
wq->threshold = wq->wqcfg->wq_thresh;
|
||||||
if (wq->wqcfg->priv)
|
|
||||||
wq->type = IDXD_WQT_KERNEL;
|
|
||||||
|
|
||||||
/* The driver does not support shared WQ mode in read-only config yet */
|
/* The driver does not support shared WQ mode in read-only config yet */
|
||||||
if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
|
if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
|
||||||
@ -1145,7 +1085,7 @@ int idxd_device_load_config(struct idxd_device *idxd)
|
|||||||
int i, rc;
|
int i, rc;
|
||||||
|
|
||||||
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
idxd->token_limit = reg.token_limit;
|
idxd->rdbuf_limit = reg.rdbuf_limit;
|
||||||
|
|
||||||
for (i = 0; i < idxd->max_groups; i++) {
|
for (i = 0; i < idxd->max_groups; i++) {
|
||||||
struct idxd_group *group = idxd->groups[i];
|
struct idxd_group *group = idxd->groups[i];
|
||||||
@ -1164,6 +1104,106 @@ int idxd_device_load_config(struct idxd_device *idxd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
struct idxd_desc *desc, *itr;
|
||||||
|
struct llist_node *head;
|
||||||
|
LIST_HEAD(flist);
|
||||||
|
enum idxd_complete_type ctype;
|
||||||
|
|
||||||
|
spin_lock(&ie->list_lock);
|
||||||
|
head = llist_del_all(&ie->pending_llist);
|
||||||
|
if (head) {
|
||||||
|
llist_for_each_entry_safe(desc, itr, head, llnode)
|
||||||
|
list_add_tail(&desc->list, &ie->work_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry_safe(desc, itr, &ie->work_list, list)
|
||||||
|
list_move_tail(&desc->list, &flist);
|
||||||
|
spin_unlock(&ie->list_lock);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(desc, itr, &flist, list) {
|
||||||
|
list_del(&desc->list);
|
||||||
|
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
|
||||||
|
idxd_dma_complete_txd(desc, ctype, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void idxd_device_set_perm_entry(struct idxd_device *idxd,
|
||||||
|
struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
union msix_perm mperm;
|
||||||
|
|
||||||
|
if (ie->pasid == INVALID_IOASID)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mperm.bits = 0;
|
||||||
|
mperm.pasid = ie->pasid;
|
||||||
|
mperm.pasid_en = 1;
|
||||||
|
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
|
||||||
|
struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
void idxd_wq_free_irq(struct idxd_wq *wq)
|
||||||
|
{
|
||||||
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
struct idxd_irq_entry *ie = &wq->ie;
|
||||||
|
|
||||||
|
synchronize_irq(ie->vector);
|
||||||
|
free_irq(ie->vector, ie);
|
||||||
|
idxd_flush_pending_descs(ie);
|
||||||
|
if (idxd->request_int_handles)
|
||||||
|
idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
|
||||||
|
idxd_device_clear_perm_entry(idxd, ie);
|
||||||
|
ie->vector = -1;
|
||||||
|
ie->int_handle = INVALID_INT_HANDLE;
|
||||||
|
ie->pasid = INVALID_IOASID;
|
||||||
|
}
|
||||||
|
|
||||||
|
int idxd_wq_request_irq(struct idxd_wq *wq)
|
||||||
|
{
|
||||||
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct idxd_irq_entry *ie;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
ie = &wq->ie;
|
||||||
|
ie->vector = pci_irq_vector(pdev, ie->id);
|
||||||
|
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
|
||||||
|
idxd_device_set_perm_entry(idxd, ie);
|
||||||
|
|
||||||
|
rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
|
||||||
|
if (rc < 0) {
|
||||||
|
dev_err(dev, "Failed to request irq %d.\n", ie->vector);
|
||||||
|
goto err_irq;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (idxd->request_int_handles) {
|
||||||
|
rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
|
||||||
|
IDXD_IRQ_MSIX);
|
||||||
|
if (rc < 0)
|
||||||
|
goto err_int_handle;
|
||||||
|
} else {
|
||||||
|
ie->int_handle = ie->id;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_int_handle:
|
||||||
|
ie->int_handle = INVALID_INT_HANDLE;
|
||||||
|
free_irq(ie->vector, ie);
|
||||||
|
err_irq:
|
||||||
|
idxd_device_clear_perm_entry(idxd, ie);
|
||||||
|
ie->pasid = INVALID_IOASID;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
int __drv_enable_wq(struct idxd_wq *wq)
|
int __drv_enable_wq(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
@ -21,20 +21,27 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||||
enum idxd_complete_type comp_type)
|
enum idxd_complete_type comp_type,
|
||||||
|
bool free_desc)
|
||||||
{
|
{
|
||||||
|
struct idxd_device *idxd = desc->wq->idxd;
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
struct dmaengine_result res;
|
struct dmaengine_result res;
|
||||||
int complete = 1;
|
int complete = 1;
|
||||||
|
|
||||||
if (desc->completion->status == DSA_COMP_SUCCESS)
|
if (desc->completion->status == DSA_COMP_SUCCESS) {
|
||||||
res.result = DMA_TRANS_NOERROR;
|
res.result = DMA_TRANS_NOERROR;
|
||||||
else if (desc->completion->status)
|
} else if (desc->completion->status) {
|
||||||
|
if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
|
||||||
|
desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
|
||||||
|
idxd_queue_int_handle_resubmit(desc))
|
||||||
|
return;
|
||||||
res.result = DMA_TRANS_WRITE_FAILED;
|
res.result = DMA_TRANS_WRITE_FAILED;
|
||||||
else if (comp_type == IDXD_COMPLETE_ABORT)
|
} else if (comp_type == IDXD_COMPLETE_ABORT) {
|
||||||
res.result = DMA_TRANS_ABORTED;
|
res.result = DMA_TRANS_ABORTED;
|
||||||
else
|
} else {
|
||||||
complete = 0;
|
complete = 0;
|
||||||
|
}
|
||||||
|
|
||||||
tx = &desc->txd;
|
tx = &desc->txd;
|
||||||
if (complete && tx->cookie) {
|
if (complete && tx->cookie) {
|
||||||
@ -44,6 +51,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc,
|
|||||||
tx->callback = NULL;
|
tx->callback = NULL;
|
||||||
tx->callback_result = NULL;
|
tx->callback_result = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (free_desc)
|
||||||
|
idxd_free_desc(desc->wq, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
|
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
|
||||||
@ -153,8 +163,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||||||
cookie = dma_cookie_assign(tx);
|
cookie = dma_cookie_assign(tx);
|
||||||
|
|
||||||
rc = idxd_submit_desc(wq, desc);
|
rc = idxd_submit_desc(wq, desc);
|
||||||
if (rc < 0)
|
if (rc < 0) {
|
||||||
|
idxd_free_desc(wq, desc);
|
||||||
return rc;
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
@ -277,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
|||||||
|
|
||||||
mutex_lock(&wq->wq_lock);
|
mutex_lock(&wq->wq_lock);
|
||||||
wq->type = IDXD_WQT_KERNEL;
|
wq->type = IDXD_WQT_KERNEL;
|
||||||
|
|
||||||
|
rc = idxd_wq_request_irq(wq);
|
||||||
|
if (rc < 0) {
|
||||||
|
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
|
||||||
|
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
|
||||||
|
goto err_irq;
|
||||||
|
}
|
||||||
|
|
||||||
rc = __drv_enable_wq(wq);
|
rc = __drv_enable_wq(wq);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
|
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
|
||||||
@ -310,13 +330,15 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_dma:
|
err_dma:
|
||||||
idxd_wq_quiesce(wq);
|
__idxd_wq_quiesce(wq);
|
||||||
percpu_ref_exit(&wq->wq_active);
|
percpu_ref_exit(&wq->wq_active);
|
||||||
err_ref:
|
err_ref:
|
||||||
idxd_wq_free_resources(wq);
|
idxd_wq_free_resources(wq);
|
||||||
err_res_alloc:
|
err_res_alloc:
|
||||||
__drv_disable_wq(wq);
|
__drv_disable_wq(wq);
|
||||||
err:
|
err:
|
||||||
|
idxd_wq_free_irq(wq);
|
||||||
|
err_irq:
|
||||||
wq->type = IDXD_WQT_NONE;
|
wq->type = IDXD_WQT_NONE;
|
||||||
mutex_unlock(&wq->wq_lock);
|
mutex_unlock(&wq->wq_lock);
|
||||||
return rc;
|
return rc;
|
||||||
@ -327,11 +349,13 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
|
|||||||
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
|
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
|
||||||
|
|
||||||
mutex_lock(&wq->wq_lock);
|
mutex_lock(&wq->wq_lock);
|
||||||
idxd_wq_quiesce(wq);
|
__idxd_wq_quiesce(wq);
|
||||||
idxd_unregister_dma_channel(wq);
|
idxd_unregister_dma_channel(wq);
|
||||||
idxd_wq_free_resources(wq);
|
idxd_wq_free_resources(wq);
|
||||||
__drv_disable_wq(wq);
|
__drv_disable_wq(wq);
|
||||||
percpu_ref_exit(&wq->wq_active);
|
percpu_ref_exit(&wq->wq_active);
|
||||||
|
idxd_wq_free_irq(wq);
|
||||||
|
wq->type = IDXD_WQT_NONE;
|
||||||
mutex_unlock(&wq->wq_lock);
|
mutex_unlock(&wq->wq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
#include <linux/ioasid.h>
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
#include <uapi/linux/idxd.h>
|
#include <uapi/linux/idxd.h>
|
||||||
#include "registers.h"
|
#include "registers.h"
|
||||||
@ -51,6 +52,9 @@ enum idxd_type {
|
|||||||
#define IDXD_NAME_SIZE 128
|
#define IDXD_NAME_SIZE 128
|
||||||
#define IDXD_PMU_EVENT_MAX 64
|
#define IDXD_PMU_EVENT_MAX 64
|
||||||
|
|
||||||
|
#define IDXD_ENQCMDS_RETRIES 32
|
||||||
|
#define IDXD_ENQCMDS_MAX_RETRIES 64
|
||||||
|
|
||||||
struct idxd_device_driver {
|
struct idxd_device_driver {
|
||||||
const char *name;
|
const char *name;
|
||||||
enum idxd_dev_type *type;
|
enum idxd_dev_type *type;
|
||||||
@ -64,8 +68,8 @@ extern struct idxd_device_driver idxd_drv;
|
|||||||
extern struct idxd_device_driver idxd_dmaengine_drv;
|
extern struct idxd_device_driver idxd_dmaengine_drv;
|
||||||
extern struct idxd_device_driver idxd_user_drv;
|
extern struct idxd_device_driver idxd_user_drv;
|
||||||
|
|
||||||
|
#define INVALID_INT_HANDLE -1
|
||||||
struct idxd_irq_entry {
|
struct idxd_irq_entry {
|
||||||
struct idxd_device *idxd;
|
|
||||||
int id;
|
int id;
|
||||||
int vector;
|
int vector;
|
||||||
struct llist_head pending_llist;
|
struct llist_head pending_llist;
|
||||||
@ -75,6 +79,8 @@ struct idxd_irq_entry {
|
|||||||
* and irq thread processing error descriptor.
|
* and irq thread processing error descriptor.
|
||||||
*/
|
*/
|
||||||
spinlock_t list_lock;
|
spinlock_t list_lock;
|
||||||
|
int int_handle;
|
||||||
|
ioasid_t pasid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct idxd_group {
|
struct idxd_group {
|
||||||
@ -84,9 +90,9 @@ struct idxd_group {
|
|||||||
int id;
|
int id;
|
||||||
int num_engines;
|
int num_engines;
|
||||||
int num_wqs;
|
int num_wqs;
|
||||||
bool use_token_limit;
|
bool use_rdbuf_limit;
|
||||||
u8 tokens_allowed;
|
u8 rdbufs_allowed;
|
||||||
u8 tokens_reserved;
|
u8 rdbufs_reserved;
|
||||||
int tc_a;
|
int tc_a;
|
||||||
int tc_b;
|
int tc_b;
|
||||||
};
|
};
|
||||||
@ -145,6 +151,10 @@ struct idxd_cdev {
|
|||||||
#define WQ_NAME_SIZE 1024
|
#define WQ_NAME_SIZE 1024
|
||||||
#define WQ_TYPE_SIZE 10
|
#define WQ_TYPE_SIZE 10
|
||||||
|
|
||||||
|
#define WQ_DEFAULT_QUEUE_DEPTH 16
|
||||||
|
#define WQ_DEFAULT_MAX_XFER SZ_2M
|
||||||
|
#define WQ_DEFAULT_MAX_BATCH 32
|
||||||
|
|
||||||
enum idxd_op_type {
|
enum idxd_op_type {
|
||||||
IDXD_OP_BLOCK = 0,
|
IDXD_OP_BLOCK = 0,
|
||||||
IDXD_OP_NONBLOCK = 1,
|
IDXD_OP_NONBLOCK = 1,
|
||||||
@ -164,13 +174,16 @@ struct idxd_dma_chan {
|
|||||||
struct idxd_wq {
|
struct idxd_wq {
|
||||||
void __iomem *portal;
|
void __iomem *portal;
|
||||||
u32 portal_offset;
|
u32 portal_offset;
|
||||||
|
unsigned int enqcmds_retries;
|
||||||
struct percpu_ref wq_active;
|
struct percpu_ref wq_active;
|
||||||
struct completion wq_dead;
|
struct completion wq_dead;
|
||||||
|
struct completion wq_resurrect;
|
||||||
struct idxd_dev idxd_dev;
|
struct idxd_dev idxd_dev;
|
||||||
struct idxd_cdev *idxd_cdev;
|
struct idxd_cdev *idxd_cdev;
|
||||||
struct wait_queue_head err_queue;
|
struct wait_queue_head err_queue;
|
||||||
struct idxd_device *idxd;
|
struct idxd_device *idxd;
|
||||||
int id;
|
int id;
|
||||||
|
struct idxd_irq_entry ie;
|
||||||
enum idxd_wq_type type;
|
enum idxd_wq_type type;
|
||||||
struct idxd_group *group;
|
struct idxd_group *group;
|
||||||
int client_count;
|
int client_count;
|
||||||
@ -187,9 +200,7 @@ struct idxd_wq {
|
|||||||
struct dsa_completion_record *compls;
|
struct dsa_completion_record *compls;
|
||||||
struct iax_completion_record *iax_compls;
|
struct iax_completion_record *iax_compls;
|
||||||
};
|
};
|
||||||
void *compls_raw;
|
|
||||||
dma_addr_t compls_addr;
|
dma_addr_t compls_addr;
|
||||||
dma_addr_t compls_addr_raw;
|
|
||||||
int compls_size;
|
int compls_size;
|
||||||
struct idxd_desc **descs;
|
struct idxd_desc **descs;
|
||||||
struct sbitmap_queue sbq;
|
struct sbitmap_queue sbq;
|
||||||
@ -253,6 +264,7 @@ struct idxd_device {
|
|||||||
int id;
|
int id;
|
||||||
int major;
|
int major;
|
||||||
u32 cmd_status;
|
u32 cmd_status;
|
||||||
|
struct idxd_irq_entry ie; /* misc irq, msix 0 */
|
||||||
|
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
void __iomem *reg_base;
|
void __iomem *reg_base;
|
||||||
@ -268,6 +280,8 @@ struct idxd_device {
|
|||||||
unsigned int pasid;
|
unsigned int pasid;
|
||||||
|
|
||||||
int num_groups;
|
int num_groups;
|
||||||
|
int irq_cnt;
|
||||||
|
bool request_int_handles;
|
||||||
|
|
||||||
u32 msix_perm_offset;
|
u32 msix_perm_offset;
|
||||||
u32 wqcfg_offset;
|
u32 wqcfg_offset;
|
||||||
@ -278,24 +292,20 @@ struct idxd_device {
|
|||||||
u32 max_batch_size;
|
u32 max_batch_size;
|
||||||
int max_groups;
|
int max_groups;
|
||||||
int max_engines;
|
int max_engines;
|
||||||
int max_tokens;
|
int max_rdbufs;
|
||||||
int max_wqs;
|
int max_wqs;
|
||||||
int max_wq_size;
|
int max_wq_size;
|
||||||
int token_limit;
|
int rdbuf_limit;
|
||||||
int nr_tokens; /* non-reserved tokens */
|
int nr_rdbufs; /* non-reserved read buffers */
|
||||||
unsigned int wqcfg_size;
|
unsigned int wqcfg_size;
|
||||||
|
|
||||||
union sw_err_reg sw_err;
|
union sw_err_reg sw_err;
|
||||||
wait_queue_head_t cmd_waitq;
|
wait_queue_head_t cmd_waitq;
|
||||||
int num_wq_irqs;
|
|
||||||
struct idxd_irq_entry *irq_entries;
|
|
||||||
|
|
||||||
struct idxd_dma_dev *idxd_dma;
|
struct idxd_dma_dev *idxd_dma;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
|
||||||
int *int_handles;
|
|
||||||
|
|
||||||
struct idxd_pmu *idxd_pmu;
|
struct idxd_pmu *idxd_pmu;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -382,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
|
|||||||
idev->type = type;
|
idev->type = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
|
||||||
|
{
|
||||||
|
return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
return container_of(ie, struct idxd_wq, ie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
return container_of(ie, struct idxd_device, ie);
|
||||||
|
}
|
||||||
|
|
||||||
extern struct bus_type dsa_bus_type;
|
extern struct bus_type dsa_bus_type;
|
||||||
|
|
||||||
extern bool support_enqcmd;
|
extern bool support_enqcmd;
|
||||||
@ -520,17 +545,13 @@ void idxd_unregister_devices(struct idxd_device *idxd);
|
|||||||
int idxd_register_driver(void);
|
int idxd_register_driver(void);
|
||||||
void idxd_unregister_driver(void);
|
void idxd_unregister_driver(void);
|
||||||
void idxd_wqs_quiesce(struct idxd_device *idxd);
|
void idxd_wqs_quiesce(struct idxd_device *idxd);
|
||||||
|
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
|
||||||
|
|
||||||
/* device interrupt control */
|
/* device interrupt control */
|
||||||
void idxd_msix_perm_setup(struct idxd_device *idxd);
|
|
||||||
void idxd_msix_perm_clear(struct idxd_device *idxd);
|
|
||||||
irqreturn_t idxd_misc_thread(int vec, void *data);
|
irqreturn_t idxd_misc_thread(int vec, void *data);
|
||||||
irqreturn_t idxd_wq_thread(int irq, void *data);
|
irqreturn_t idxd_wq_thread(int irq, void *data);
|
||||||
void idxd_mask_error_interrupts(struct idxd_device *idxd);
|
void idxd_mask_error_interrupts(struct idxd_device *idxd);
|
||||||
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
|
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
|
||||||
void idxd_mask_msix_vectors(struct idxd_device *idxd);
|
|
||||||
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
|
|
||||||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
|
|
||||||
|
|
||||||
/* device control */
|
/* device control */
|
||||||
int idxd_register_idxd_drv(void);
|
int idxd_register_idxd_drv(void);
|
||||||
@ -566,13 +587,17 @@ int idxd_wq_map_portal(struct idxd_wq *wq);
|
|||||||
void idxd_wq_unmap_portal(struct idxd_wq *wq);
|
void idxd_wq_unmap_portal(struct idxd_wq *wq);
|
||||||
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
|
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
|
||||||
int idxd_wq_disable_pasid(struct idxd_wq *wq);
|
int idxd_wq_disable_pasid(struct idxd_wq *wq);
|
||||||
|
void __idxd_wq_quiesce(struct idxd_wq *wq);
|
||||||
void idxd_wq_quiesce(struct idxd_wq *wq);
|
void idxd_wq_quiesce(struct idxd_wq *wq);
|
||||||
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
|
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
|
||||||
|
void idxd_wq_free_irq(struct idxd_wq *wq);
|
||||||
|
int idxd_wq_request_irq(struct idxd_wq *wq);
|
||||||
|
|
||||||
/* submission */
|
/* submission */
|
||||||
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
|
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
|
||||||
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
|
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
|
||||||
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
|
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
|
||||||
|
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
|
||||||
|
|
||||||
/* dmaengine */
|
/* dmaengine */
|
||||||
int idxd_register_dma_device(struct idxd_device *idxd);
|
int idxd_register_dma_device(struct idxd_device *idxd);
|
||||||
@ -581,7 +606,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq);
|
|||||||
void idxd_unregister_dma_channel(struct idxd_wq *wq);
|
void idxd_unregister_dma_channel(struct idxd_wq *wq);
|
||||||
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
|
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
|
||||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||||
enum idxd_complete_type comp_type);
|
enum idxd_complete_type comp_type, bool free_desc);
|
||||||
|
|
||||||
/* cdev */
|
/* cdev */
|
||||||
int idxd_cdev_register(void);
|
int idxd_cdev_register(void);
|
||||||
@ -605,10 +630,4 @@ static inline void perfmon_init(void) {}
|
|||||||
static inline void perfmon_exit(void) {}
|
static inline void perfmon_exit(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
|
|
||||||
{
|
|
||||||
idxd_dma_complete_txd(desc, reason);
|
|
||||||
idxd_free_desc(desc->wq, desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||||||
{
|
{
|
||||||
struct pci_dev *pdev = idxd->pdev;
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct idxd_irq_entry *irq_entry;
|
struct idxd_irq_entry *ie;
|
||||||
int i, msixcnt;
|
int i, msixcnt;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
@ -81,6 +81,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||||||
dev_err(dev, "Not MSI-X interrupt capable.\n");
|
dev_err(dev, "Not MSI-X interrupt capable.\n");
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
idxd->irq_cnt = msixcnt;
|
||||||
|
|
||||||
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
|
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
|
||||||
if (rc != msixcnt) {
|
if (rc != msixcnt) {
|
||||||
@ -89,87 +90,34 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||||||
}
|
}
|
||||||
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
|
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
|
||||||
|
|
||||||
/*
|
|
||||||
* We implement 1 completion list per MSI-X entry except for
|
|
||||||
* entry 0, which is for errors and others.
|
|
||||||
*/
|
|
||||||
idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
|
|
||||||
GFP_KERNEL, dev_to_node(dev));
|
|
||||||
if (!idxd->irq_entries) {
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto err_irq_entries;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
ie = idxd_get_ie(idxd, 0);
|
||||||
idxd->irq_entries[i].id = i;
|
ie->vector = pci_irq_vector(pdev, 0);
|
||||||
idxd->irq_entries[i].idxd = idxd;
|
rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
|
||||||
idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
|
|
||||||
spin_lock_init(&idxd->irq_entries[i].list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
idxd_msix_perm_setup(idxd);
|
|
||||||
|
|
||||||
irq_entry = &idxd->irq_entries[0];
|
|
||||||
rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
|
|
||||||
0, "idxd-misc", irq_entry);
|
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_err(dev, "Failed to allocate misc interrupt.\n");
|
dev_err(dev, "Failed to allocate misc interrupt.\n");
|
||||||
goto err_misc_irq;
|
goto err_misc_irq;
|
||||||
}
|
}
|
||||||
|
dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
|
||||||
|
|
||||||
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
|
for (i = 0; i < idxd->max_wqs; i++) {
|
||||||
|
int msix_idx = i + 1;
|
||||||
|
|
||||||
/* first MSI-X entry is not for wq interrupts */
|
ie = idxd_get_ie(idxd, msix_idx);
|
||||||
idxd->num_wq_irqs = msixcnt - 1;
|
ie->id = msix_idx;
|
||||||
|
ie->int_handle = INVALID_INT_HANDLE;
|
||||||
|
ie->pasid = INVALID_IOASID;
|
||||||
|
|
||||||
for (i = 1; i < msixcnt; i++) {
|
spin_lock_init(&ie->list_lock);
|
||||||
irq_entry = &idxd->irq_entries[i];
|
init_llist_head(&ie->pending_llist);
|
||||||
|
INIT_LIST_HEAD(&ie->work_list);
|
||||||
init_llist_head(&idxd->irq_entries[i].pending_llist);
|
|
||||||
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
|
|
||||||
rc = request_threaded_irq(irq_entry->vector, NULL,
|
|
||||||
idxd_wq_thread, 0, "idxd-portal", irq_entry);
|
|
||||||
if (rc < 0) {
|
|
||||||
dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
|
|
||||||
goto err_wq_irqs;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
|
|
||||||
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
|
|
||||||
/*
|
|
||||||
* The MSIX vector enumeration starts at 1 with vector 0 being the
|
|
||||||
* misc interrupt that handles non I/O completion events. The
|
|
||||||
* interrupt handles are for IMS enumeration on guest. The misc
|
|
||||||
* interrupt vector does not require a handle and therefore we start
|
|
||||||
* the int_handles at index 0. Since 'i' starts at 1, the first
|
|
||||||
* int_handles index will be 0.
|
|
||||||
*/
|
|
||||||
rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
|
|
||||||
IDXD_IRQ_MSIX);
|
|
||||||
if (rc < 0) {
|
|
||||||
free_irq(irq_entry->vector, irq_entry);
|
|
||||||
goto err_wq_irqs;
|
|
||||||
}
|
|
||||||
dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd_unmask_error_interrupts(idxd);
|
idxd_unmask_error_interrupts(idxd);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_wq_irqs:
|
|
||||||
while (--i >= 0) {
|
|
||||||
irq_entry = &idxd->irq_entries[i];
|
|
||||||
free_irq(irq_entry->vector, irq_entry);
|
|
||||||
if (i != 0)
|
|
||||||
idxd_device_release_int_handle(idxd,
|
|
||||||
idxd->int_handles[i], IDXD_IRQ_MSIX);
|
|
||||||
}
|
|
||||||
err_misc_irq:
|
err_misc_irq:
|
||||||
/* Disable error interrupt generation */
|
|
||||||
idxd_mask_error_interrupts(idxd);
|
idxd_mask_error_interrupts(idxd);
|
||||||
idxd_msix_perm_clear(idxd);
|
|
||||||
err_irq_entries:
|
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
dev_err(dev, "No usable interrupts\n");
|
dev_err(dev, "No usable interrupts\n");
|
||||||
return rc;
|
return rc;
|
||||||
@ -178,26 +126,16 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||||||
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
|
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = idxd->pdev;
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
struct idxd_irq_entry *irq_entry;
|
struct idxd_irq_entry *ie;
|
||||||
int i, msixcnt;
|
int msixcnt;
|
||||||
|
|
||||||
msixcnt = pci_msix_vec_count(pdev);
|
msixcnt = pci_msix_vec_count(pdev);
|
||||||
if (msixcnt <= 0)
|
if (msixcnt <= 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
irq_entry = &idxd->irq_entries[0];
|
ie = idxd_get_ie(idxd, 0);
|
||||||
free_irq(irq_entry->vector, irq_entry);
|
|
||||||
|
|
||||||
for (i = 1; i < msixcnt; i++) {
|
|
||||||
|
|
||||||
irq_entry = &idxd->irq_entries[i];
|
|
||||||
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
|
|
||||||
idxd_device_release_int_handle(idxd, idxd->int_handles[i],
|
|
||||||
IDXD_IRQ_MSIX);
|
|
||||||
free_irq(irq_entry->vector, irq_entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
idxd_mask_error_interrupts(idxd);
|
idxd_mask_error_interrupts(idxd);
|
||||||
|
free_irq(ie->vector, ie);
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,8 +175,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
|||||||
mutex_init(&wq->wq_lock);
|
mutex_init(&wq->wq_lock);
|
||||||
init_waitqueue_head(&wq->err_queue);
|
init_waitqueue_head(&wq->err_queue);
|
||||||
init_completion(&wq->wq_dead);
|
init_completion(&wq->wq_dead);
|
||||||
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
init_completion(&wq->wq_resurrect);
|
||||||
wq->max_batch_size = idxd->max_batch_size;
|
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
|
||||||
|
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
|
||||||
|
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
|
||||||
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
|
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
|
||||||
if (!wq->wqcfg) {
|
if (!wq->wqcfg) {
|
||||||
put_device(conf_dev);
|
put_device(conf_dev);
|
||||||
@ -379,13 +319,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
|||||||
|
|
||||||
init_waitqueue_head(&idxd->cmd_waitq);
|
init_waitqueue_head(&idxd->cmd_waitq);
|
||||||
|
|
||||||
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
|
|
||||||
idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
|
|
||||||
dev_to_node(dev));
|
|
||||||
if (!idxd->int_handles)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = idxd_setup_wqs(idxd);
|
rc = idxd_setup_wqs(idxd);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err_wqs;
|
goto err_wqs;
|
||||||
@ -416,7 +349,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
|||||||
for (i = 0; i < idxd->max_wqs; i++)
|
for (i = 0; i < idxd->max_wqs; i++)
|
||||||
put_device(wq_confdev(idxd->wqs[i]));
|
put_device(wq_confdev(idxd->wqs[i]));
|
||||||
err_wqs:
|
err_wqs:
|
||||||
kfree(idxd->int_handles);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,6 +383,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
|||||||
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
|
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reading command capabilities */
|
||||||
|
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
|
||||||
|
idxd->request_int_handles = true;
|
||||||
|
|
||||||
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
|
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
|
||||||
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
|
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
|
||||||
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
|
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
|
||||||
@ -464,9 +400,9 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
|||||||
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
|
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
|
||||||
idxd->max_groups = idxd->hw.group_cap.num_groups;
|
idxd->max_groups = idxd->hw.group_cap.num_groups;
|
||||||
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
|
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
|
||||||
idxd->max_tokens = idxd->hw.group_cap.total_tokens;
|
idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
|
||||||
dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
|
dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
|
||||||
idxd->nr_tokens = idxd->max_tokens;
|
idxd->nr_rdbufs = idxd->max_rdbufs;
|
||||||
|
|
||||||
/* read engine capabilities */
|
/* read engine capabilities */
|
||||||
idxd->hw.engine_cap.bits =
|
idxd->hw.engine_cap.bits =
|
||||||
@ -611,8 +547,6 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||||||
if (rc)
|
if (rc)
|
||||||
goto err_config;
|
goto err_config;
|
||||||
|
|
||||||
dev_dbg(dev, "IDXD interrupt setup complete.\n");
|
|
||||||
|
|
||||||
idxd->major = idxd_cdev_get_major(idxd);
|
idxd->major = idxd_cdev_get_major(idxd);
|
||||||
|
|
||||||
rc = perfmon_pmu_init(idxd);
|
rc = perfmon_pmu_init(idxd);
|
||||||
@ -708,32 +642,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
|
|
||||||
{
|
|
||||||
struct idxd_desc *desc, *itr;
|
|
||||||
struct llist_node *head;
|
|
||||||
|
|
||||||
head = llist_del_all(&ie->pending_llist);
|
|
||||||
if (!head)
|
|
||||||
return;
|
|
||||||
|
|
||||||
llist_for_each_entry_safe(desc, itr, head, llnode) {
|
|
||||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
|
|
||||||
idxd_free_desc(desc->wq, desc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void idxd_flush_work_list(struct idxd_irq_entry *ie)
|
|
||||||
{
|
|
||||||
struct idxd_desc *desc, *iter;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
|
|
||||||
list_del(&desc->list);
|
|
||||||
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
|
|
||||||
idxd_free_desc(desc->wq, desc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void idxd_wqs_quiesce(struct idxd_device *idxd)
|
void idxd_wqs_quiesce(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct idxd_wq *wq;
|
struct idxd_wq *wq;
|
||||||
@ -746,47 +654,19 @@ void idxd_wqs_quiesce(struct idxd_device *idxd)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void idxd_release_int_handles(struct idxd_device *idxd)
|
|
||||||
{
|
|
||||||
struct device *dev = &idxd->pdev->dev;
|
|
||||||
int i, rc;
|
|
||||||
|
|
||||||
for (i = 0; i < idxd->num_wq_irqs; i++) {
|
|
||||||
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
|
|
||||||
rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
|
|
||||||
IDXD_IRQ_MSIX);
|
|
||||||
if (rc < 0)
|
|
||||||
dev_warn(dev, "irq handle %d release failed\n",
|
|
||||||
idxd->int_handles[i]);
|
|
||||||
else
|
|
||||||
dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void idxd_shutdown(struct pci_dev *pdev)
|
static void idxd_shutdown(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = pci_get_drvdata(pdev);
|
struct idxd_device *idxd = pci_get_drvdata(pdev);
|
||||||
int rc, i;
|
|
||||||
struct idxd_irq_entry *irq_entry;
|
struct idxd_irq_entry *irq_entry;
|
||||||
int msixcnt = pci_msix_vec_count(pdev);
|
int rc;
|
||||||
|
|
||||||
rc = idxd_device_disable(idxd);
|
rc = idxd_device_disable(idxd);
|
||||||
if (rc)
|
if (rc)
|
||||||
dev_err(&pdev->dev, "Disabling device failed\n");
|
dev_err(&pdev->dev, "Disabling device failed\n");
|
||||||
|
|
||||||
dev_dbg(&pdev->dev, "%s called\n", __func__);
|
irq_entry = &idxd->ie;
|
||||||
idxd_mask_msix_vectors(idxd);
|
synchronize_irq(irq_entry->vector);
|
||||||
idxd_mask_error_interrupts(idxd);
|
idxd_mask_error_interrupts(idxd);
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
|
||||||
irq_entry = &idxd->irq_entries[i];
|
|
||||||
synchronize_irq(irq_entry->vector);
|
|
||||||
if (i == 0)
|
|
||||||
continue;
|
|
||||||
idxd_flush_pending_llist(irq_entry);
|
|
||||||
idxd_flush_work_list(irq_entry);
|
|
||||||
}
|
|
||||||
flush_workqueue(idxd->wq);
|
flush_workqueue(idxd->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -794,8 +674,6 @@ static void idxd_remove(struct pci_dev *pdev)
|
|||||||
{
|
{
|
||||||
struct idxd_device *idxd = pci_get_drvdata(pdev);
|
struct idxd_device *idxd = pci_get_drvdata(pdev);
|
||||||
struct idxd_irq_entry *irq_entry;
|
struct idxd_irq_entry *irq_entry;
|
||||||
int msixcnt = pci_msix_vec_count(pdev);
|
|
||||||
int i;
|
|
||||||
|
|
||||||
idxd_unregister_devices(idxd);
|
idxd_unregister_devices(idxd);
|
||||||
/*
|
/*
|
||||||
@ -811,12 +689,8 @@ static void idxd_remove(struct pci_dev *pdev)
|
|||||||
if (device_pasid_enabled(idxd))
|
if (device_pasid_enabled(idxd))
|
||||||
idxd_disable_system_pasid(idxd);
|
idxd_disable_system_pasid(idxd);
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
irq_entry = idxd_get_ie(idxd, 0);
|
||||||
irq_entry = &idxd->irq_entries[i];
|
free_irq(irq_entry->vector, irq_entry);
|
||||||
free_irq(irq_entry->vector, irq_entry);
|
|
||||||
}
|
|
||||||
idxd_msix_perm_clear(idxd);
|
|
||||||
idxd_release_int_handles(idxd);
|
|
||||||
pci_free_irq_vectors(pdev);
|
pci_free_irq_vectors(pdev);
|
||||||
pci_iounmap(pdev, idxd->reg_base);
|
pci_iounmap(pdev, idxd->reg_base);
|
||||||
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <uapi/linux/idxd.h>
|
#include <uapi/linux/idxd.h>
|
||||||
#include "../dmaengine.h"
|
#include "../dmaengine.h"
|
||||||
#include "idxd.h"
|
#include "idxd.h"
|
||||||
@ -22,6 +23,16 @@ struct idxd_fault {
|
|||||||
struct idxd_device *idxd;
|
struct idxd_device *idxd;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct idxd_resubmit {
|
||||||
|
struct work_struct work;
|
||||||
|
struct idxd_desc *desc;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct idxd_int_handle_revoke {
|
||||||
|
struct work_struct work;
|
||||||
|
struct idxd_device *idxd;
|
||||||
|
};
|
||||||
|
|
||||||
static void idxd_device_reinit(struct work_struct *work)
|
static void idxd_device_reinit(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
|
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
|
||||||
@ -55,6 +66,162 @@ static void idxd_device_reinit(struct work_struct *work)
|
|||||||
idxd_device_clear_state(idxd);
|
idxd_device_clear_state(idxd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The function sends a drain descriptor for the interrupt handle. The drain ensures
|
||||||
|
* all descriptors with this interrupt handle is flushed and the interrupt
|
||||||
|
* will allow the cleanup of the outstanding descriptors.
|
||||||
|
*/
|
||||||
|
static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
struct idxd_wq *wq = ie_to_wq(ie);
|
||||||
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
struct dsa_hw_desc desc = {};
|
||||||
|
void __iomem *portal;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* Issue a simple drain operation with interrupt but no completion record */
|
||||||
|
desc.flags = IDXD_OP_FLAG_RCI;
|
||||||
|
desc.opcode = DSA_OPCODE_DRAIN;
|
||||||
|
desc.priv = 1;
|
||||||
|
|
||||||
|
if (ie->pasid != INVALID_IOASID)
|
||||||
|
desc.pasid = ie->pasid;
|
||||||
|
desc.int_handle = ie->int_handle;
|
||||||
|
portal = idxd_wq_portal_addr(wq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The wmb() makes sure that the descriptor is all there before we
|
||||||
|
* issue.
|
||||||
|
*/
|
||||||
|
wmb();
|
||||||
|
if (wq_dedicated(wq)) {
|
||||||
|
iosubmit_cmds512(portal, &desc, 1);
|
||||||
|
} else {
|
||||||
|
rc = idxd_enqcmds(wq, portal, &desc);
|
||||||
|
/* This should not fail unless hardware failed. */
|
||||||
|
if (rc < 0)
|
||||||
|
dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie)
|
||||||
|
{
|
||||||
|
LIST_HEAD(flist);
|
||||||
|
struct idxd_desc *d, *t;
|
||||||
|
struct llist_node *head;
|
||||||
|
|
||||||
|
spin_lock(&ie->list_lock);
|
||||||
|
head = llist_del_all(&ie->pending_llist);
|
||||||
|
if (head) {
|
||||||
|
llist_for_each_entry_safe(d, t, head, llnode)
|
||||||
|
list_add_tail(&d->list, &ie->work_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry_safe(d, t, &ie->work_list, list) {
|
||||||
|
if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL)
|
||||||
|
list_move_tail(&d->list, &flist);
|
||||||
|
}
|
||||||
|
spin_unlock(&ie->list_lock);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(d, t, &flist, list) {
|
||||||
|
list_del(&d->list);
|
||||||
|
idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void idxd_int_handle_revoke(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct idxd_int_handle_revoke *revoke =
|
||||||
|
container_of(work, struct idxd_int_handle_revoke, work);
|
||||||
|
struct idxd_device *idxd = revoke->idxd;
|
||||||
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
int i, new_handle, rc;
|
||||||
|
|
||||||
|
if (!idxd->request_int_handles) {
|
||||||
|
kfree(revoke);
|
||||||
|
dev_warn(dev, "Unexpected int handle refresh interrupt.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The loop attempts to acquire new interrupt handle for all interrupt
|
||||||
|
* vectors that supports a handle. If a new interrupt handle is acquired and the
|
||||||
|
* wq is kernel type, the driver will kill the percpu_ref to pause all
|
||||||
|
* ongoing descriptor submissions. The interrupt handle is then changed.
|
||||||
|
* After change, the percpu_ref is revived and all the pending submissions
|
||||||
|
* are woken to try again. A drain is sent to for the interrupt handle
|
||||||
|
* at the end to make sure all invalid int handle descriptors are processed.
|
||||||
|
*/
|
||||||
|
for (i = 1; i < idxd->irq_cnt; i++) {
|
||||||
|
struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
|
||||||
|
struct idxd_wq *wq = ie_to_wq(ie);
|
||||||
|
|
||||||
|
if (ie->int_handle == INVALID_INT_HANDLE)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
|
||||||
|
if (rc < 0) {
|
||||||
|
dev_warn(dev, "get int handle %d failed: %d\n", i, rc);
|
||||||
|
/*
|
||||||
|
* Failed to acquire new interrupt handle. Kill the WQ
|
||||||
|
* and release all the pending submitters. The submitters will
|
||||||
|
* get error return code and handle appropriately.
|
||||||
|
*/
|
||||||
|
ie->int_handle = INVALID_INT_HANDLE;
|
||||||
|
idxd_wq_quiesce(wq);
|
||||||
|
idxd_abort_invalid_int_handle_descs(ie);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* No change in interrupt handle, nothing needs to be done */
|
||||||
|
if (ie->int_handle == new_handle)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) {
|
||||||
|
/*
|
||||||
|
* All the MSIX interrupts are allocated at once during probe.
|
||||||
|
* Therefore we need to update all interrupts even if the WQ
|
||||||
|
* isn't supporting interrupt operations.
|
||||||
|
*/
|
||||||
|
ie->int_handle = new_handle;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&wq->wq_lock);
|
||||||
|
reinit_completion(&wq->wq_resurrect);
|
||||||
|
|
||||||
|
/* Kill percpu_ref to pause additional descriptor submissions */
|
||||||
|
percpu_ref_kill(&wq->wq_active);
|
||||||
|
|
||||||
|
/* Wait for all submitters quiesce before we change interrupt handle */
|
||||||
|
wait_for_completion(&wq->wq_dead);
|
||||||
|
|
||||||
|
ie->int_handle = new_handle;
|
||||||
|
|
||||||
|
/* Revive percpu ref and wake up all the waiting submitters */
|
||||||
|
percpu_ref_reinit(&wq->wq_active);
|
||||||
|
complete_all(&wq->wq_resurrect);
|
||||||
|
mutex_unlock(&wq->wq_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The delay here is to wait for all possible MOVDIR64B that
|
||||||
|
* are issued before percpu_ref_kill() has happened to have
|
||||||
|
* reached the PCIe domain before the drain is issued. The driver
|
||||||
|
* needs to ensure that the drain descriptor issued does not pass
|
||||||
|
* all the other issued descriptors that contain the invalid
|
||||||
|
* interrupt handle in order to ensure that the drain descriptor
|
||||||
|
* interrupt will allow the cleanup of all the descriptors with
|
||||||
|
* invalid interrupt handle.
|
||||||
|
*/
|
||||||
|
if (wq_dedicated(wq))
|
||||||
|
udelay(100);
|
||||||
|
idxd_int_handle_revoke_drain(ie);
|
||||||
|
}
|
||||||
|
kfree(revoke);
|
||||||
|
}
|
||||||
|
|
||||||
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
@ -101,6 +268,23 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||||||
err = true;
|
err = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
|
||||||
|
struct idxd_int_handle_revoke *revoke;
|
||||||
|
|
||||||
|
val |= IDXD_INTC_INT_HANDLE_REVOKED;
|
||||||
|
|
||||||
|
revoke = kzalloc(sizeof(*revoke), GFP_ATOMIC);
|
||||||
|
if (revoke) {
|
||||||
|
revoke->idxd = idxd;
|
||||||
|
INIT_WORK(&revoke->work, idxd_int_handle_revoke);
|
||||||
|
queue_work(idxd->wq, &revoke->work);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
dev_err(dev, "Failed to allocate work for int handle revoke\n");
|
||||||
|
idxd_wqs_quiesce(idxd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (cause & IDXD_INTC_CMD) {
|
if (cause & IDXD_INTC_CMD) {
|
||||||
val |= IDXD_INTC_CMD;
|
val |= IDXD_INTC_CMD;
|
||||||
complete(idxd->cmd_done);
|
complete(idxd->cmd_done);
|
||||||
@ -157,7 +341,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||||
{
|
{
|
||||||
struct idxd_irq_entry *irq_entry = data;
|
struct idxd_irq_entry *irq_entry = data;
|
||||||
struct idxd_device *idxd = irq_entry->idxd;
|
struct idxd_device *idxd = ie_to_idxd(irq_entry);
|
||||||
int rc;
|
int rc;
|
||||||
u32 cause;
|
u32 cause;
|
||||||
|
|
||||||
@ -177,6 +361,51 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idxd_int_handle_resubmit_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work);
|
||||||
|
struct idxd_desc *desc = irw->desc;
|
||||||
|
struct idxd_wq *wq = desc->wq;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
desc->completion->status = 0;
|
||||||
|
rc = idxd_submit_desc(wq, desc);
|
||||||
|
if (rc < 0) {
|
||||||
|
dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n",
|
||||||
|
desc->id, wq->id);
|
||||||
|
/*
|
||||||
|
* If the error is not -EAGAIN, it means the submission failed due to wq
|
||||||
|
* has been killed instead of ENQCMDS failure. Here the driver needs to
|
||||||
|
* notify the submitter of the failure by reporting abort status.
|
||||||
|
*
|
||||||
|
* -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the
|
||||||
|
* abort.
|
||||||
|
*/
|
||||||
|
if (rc != -EAGAIN) {
|
||||||
|
desc->completion->status = IDXD_COMP_DESC_ABORT;
|
||||||
|
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false);
|
||||||
|
}
|
||||||
|
idxd_free_desc(wq, desc);
|
||||||
|
}
|
||||||
|
kfree(irw);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc)
|
||||||
|
{
|
||||||
|
struct idxd_wq *wq = desc->wq;
|
||||||
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
struct idxd_resubmit *irw;
|
||||||
|
|
||||||
|
irw = kzalloc(sizeof(*irw), GFP_KERNEL);
|
||||||
|
if (!irw)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
irw->desc = desc;
|
||||||
|
INIT_WORK(&irw->work, idxd_int_handle_resubmit_work);
|
||||||
|
queue_work(idxd->wq, &irw->work);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
|
static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
|
||||||
{
|
{
|
||||||
struct idxd_desc *desc, *t;
|
struct idxd_desc *desc, *t;
|
||||||
@ -195,11 +424,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
|
|||||||
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
|
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
|
||||||
*/
|
*/
|
||||||
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
|
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
|
||||||
complete_desc(desc, IDXD_COMPLETE_ABORT);
|
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
complete_desc(desc, IDXD_COMPLETE_NORMAL);
|
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&irq_entry->list_lock);
|
spin_lock(&irq_entry->list_lock);
|
||||||
list_add_tail(&desc->list,
|
list_add_tail(&desc->list,
|
||||||
@ -226,8 +455,7 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
|||||||
|
|
||||||
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
||||||
if (desc->completion->status) {
|
if (desc->completion->status) {
|
||||||
list_del(&desc->list);
|
list_move_tail(&desc->list, &flist);
|
||||||
list_add_tail(&desc->list, &flist);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,11 +467,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
|||||||
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
|
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
|
||||||
*/
|
*/
|
||||||
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
|
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
|
||||||
complete_desc(desc, IDXD_COMPLETE_ABORT);
|
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
complete_desc(desc, IDXD_COMPLETE_NORMAL);
|
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,8 +36,7 @@ union gen_cap_reg {
|
|||||||
u64 max_batch_shift:4;
|
u64 max_batch_shift:4;
|
||||||
u64 max_ims_mult:6;
|
u64 max_ims_mult:6;
|
||||||
u64 config_en:1;
|
u64 config_en:1;
|
||||||
u64 max_descs_per_engine:8;
|
u64 rsvd3:32;
|
||||||
u64 rsvd3:24;
|
|
||||||
};
|
};
|
||||||
u64 bits;
|
u64 bits;
|
||||||
} __packed;
|
} __packed;
|
||||||
@ -65,9 +64,9 @@ union wq_cap_reg {
|
|||||||
union group_cap_reg {
|
union group_cap_reg {
|
||||||
struct {
|
struct {
|
||||||
u64 num_groups:8;
|
u64 num_groups:8;
|
||||||
u64 total_tokens:8;
|
u64 total_rdbufs:8; /* formerly total_tokens */
|
||||||
u64 token_en:1;
|
u64 rdbuf_ctrl:1; /* formerly token_en */
|
||||||
u64 token_limit:1;
|
u64 rdbuf_limit:1; /* formerly token_limit */
|
||||||
u64 rsvd:46;
|
u64 rsvd:46;
|
||||||
};
|
};
|
||||||
u64 bits;
|
u64 bits;
|
||||||
@ -111,7 +110,7 @@ union offsets_reg {
|
|||||||
#define IDXD_GENCFG_OFFSET 0x80
|
#define IDXD_GENCFG_OFFSET 0x80
|
||||||
union gencfg_reg {
|
union gencfg_reg {
|
||||||
struct {
|
struct {
|
||||||
u32 token_limit:8;
|
u32 rdbuf_limit:8;
|
||||||
u32 rsvd:4;
|
u32 rsvd:4;
|
||||||
u32 user_int_en:1;
|
u32 user_int_en:1;
|
||||||
u32 rsvd2:19;
|
u32 rsvd2:19;
|
||||||
@ -159,6 +158,7 @@ enum idxd_device_reset_type {
|
|||||||
#define IDXD_INTC_OCCUPY 0x04
|
#define IDXD_INTC_OCCUPY 0x04
|
||||||
#define IDXD_INTC_PERFMON_OVFL 0x08
|
#define IDXD_INTC_PERFMON_OVFL 0x08
|
||||||
#define IDXD_INTC_HALT_STATE 0x10
|
#define IDXD_INTC_HALT_STATE 0x10
|
||||||
|
#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
|
||||||
|
|
||||||
#define IDXD_CMD_OFFSET 0xa0
|
#define IDXD_CMD_OFFSET 0xa0
|
||||||
union idxd_command_reg {
|
union idxd_command_reg {
|
||||||
@ -288,10 +288,10 @@ union group_flags {
|
|||||||
u32 tc_a:3;
|
u32 tc_a:3;
|
||||||
u32 tc_b:3;
|
u32 tc_b:3;
|
||||||
u32 rsvd:1;
|
u32 rsvd:1;
|
||||||
u32 use_token_limit:1;
|
u32 use_rdbuf_limit:1;
|
||||||
u32 tokens_reserved:8;
|
u32 rdbufs_reserved:8;
|
||||||
u32 rsvd2:4;
|
u32 rsvd2:4;
|
||||||
u32 tokens_allowed:8;
|
u32 rdbufs_allowed:8;
|
||||||
u32 rsvd3:4;
|
u32 rsvd3:4;
|
||||||
};
|
};
|
||||||
u32 bits;
|
u32 bits;
|
||||||
|
@ -21,15 +21,6 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
|
|||||||
if (device_pasid_enabled(idxd))
|
if (device_pasid_enabled(idxd))
|
||||||
desc->hw->pasid = idxd->pasid;
|
desc->hw->pasid = idxd->pasid;
|
||||||
|
|
||||||
/*
|
|
||||||
* On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
|
|
||||||
* vector 1:1 to the WQ id, we need to add 1
|
|
||||||
*/
|
|
||||||
if (!idxd->int_handles)
|
|
||||||
desc->hw->int_handle = wq->id + 1;
|
|
||||||
else
|
|
||||||
desc->hw->int_handle = idxd->int_handles[wq->id];
|
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,35 +125,58 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
|
|||||||
spin_unlock(&ie->list_lock);
|
spin_unlock(&ie->list_lock);
|
||||||
|
|
||||||
if (found)
|
if (found)
|
||||||
complete_desc(found, IDXD_COMPLETE_ABORT);
|
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* complete_desc() will return desc to allocator and the desc can be
|
* completing the descriptor will return desc to allocator and
|
||||||
* acquired by a different process and the desc->list can be modified.
|
* the desc can be acquired by a different process and the
|
||||||
* Delete desc from list so the list trasversing does not get corrupted
|
* desc->list can be modified. Delete desc from list so the
|
||||||
* by the other process.
|
* list trasversing does not get corrupted by the other process.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_safe(d, t, &flist, list) {
|
list_for_each_entry_safe(d, t, &flist, list) {
|
||||||
list_del_init(&d->list);
|
list_del_init(&d->list);
|
||||||
complete_desc(d, IDXD_COMPLETE_NORMAL);
|
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver
|
||||||
|
* has better control of number of descriptors being submitted to a shared wq by limiting
|
||||||
|
* the number of driver allocated descriptors to the wq size. However, when the swq is
|
||||||
|
* exported to a guest kernel, it may be shared with multiple guest kernels. This means
|
||||||
|
* the likelihood of getting busy returned on the swq when submitting goes significantly up.
|
||||||
|
* Having a tunable retry mechanism allows the driver to keep trying for a bit before giving
|
||||||
|
* up. The sysfs knob can be tuned by the system administrator.
|
||||||
|
*/
|
||||||
|
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
|
||||||
|
{
|
||||||
|
int rc, retries = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
rc = enqcmds(portal, desc);
|
||||||
|
if (rc == 0)
|
||||||
|
break;
|
||||||
|
cpu_relax();
|
||||||
|
} while (retries++ < wq->enqcmds_retries);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
struct idxd_irq_entry *ie = NULL;
|
struct idxd_irq_entry *ie = NULL;
|
||||||
|
u32 desc_flags = desc->hw->flags;
|
||||||
void __iomem *portal;
|
void __iomem *portal;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (idxd->state != IDXD_DEV_ENABLED) {
|
if (idxd->state != IDXD_DEV_ENABLED)
|
||||||
idxd_free_desc(wq, desc);
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
|
||||||
|
|
||||||
if (!percpu_ref_tryget_live(&wq->wq_active)) {
|
if (!percpu_ref_tryget_live(&wq->wq_active)) {
|
||||||
idxd_free_desc(wq, desc);
|
wait_for_completion(&wq->wq_resurrect);
|
||||||
return -ENXIO;
|
if (!percpu_ref_tryget_live(&wq->wq_active))
|
||||||
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
portal = idxd_wq_portal_addr(wq);
|
portal = idxd_wq_portal_addr(wq);
|
||||||
@ -178,28 +192,21 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
|
|||||||
* Pending the descriptor to the lockless list for the irq_entry
|
* Pending the descriptor to the lockless list for the irq_entry
|
||||||
* that we designated the descriptor to.
|
* that we designated the descriptor to.
|
||||||
*/
|
*/
|
||||||
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
|
if (desc_flags & IDXD_OP_FLAG_RCI) {
|
||||||
ie = &idxd->irq_entries[wq->id + 1];
|
ie = &wq->ie;
|
||||||
|
desc->hw->int_handle = ie->int_handle;
|
||||||
llist_add(&desc->llnode, &ie->pending_llist);
|
llist_add(&desc->llnode, &ie->pending_llist);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wq_dedicated(wq)) {
|
if (wq_dedicated(wq)) {
|
||||||
iosubmit_cmds512(portal, desc->hw, 1);
|
iosubmit_cmds512(portal, desc->hw, 1);
|
||||||
} else {
|
} else {
|
||||||
/*
|
rc = idxd_enqcmds(wq, portal, desc->hw);
|
||||||
* It's not likely that we would receive queue full rejection
|
|
||||||
* since the descriptor allocation gates at wq size. If we
|
|
||||||
* receive a -EAGAIN, that means something went wrong such as the
|
|
||||||
* device is not accepting descriptor at all.
|
|
||||||
*/
|
|
||||||
rc = enqcmds(portal, desc->hw);
|
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
percpu_ref_put(&wq->wq_active);
|
percpu_ref_put(&wq->wq_active);
|
||||||
/* abort operation frees the descriptor */
|
/* abort operation frees the descriptor */
|
||||||
if (ie)
|
if (ie)
|
||||||
llist_abort_desc(wq, ie, desc);
|
llist_abort_desc(wq, ie, desc);
|
||||||
else
|
|
||||||
idxd_free_desc(wq, desc);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,31 +99,39 @@ struct device_type idxd_engine_device_type = {
|
|||||||
|
|
||||||
/* Group attributes */
|
/* Group attributes */
|
||||||
|
|
||||||
static void idxd_set_free_tokens(struct idxd_device *idxd)
|
static void idxd_set_free_rdbufs(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
int i, tokens;
|
int i, rdbufs;
|
||||||
|
|
||||||
for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
|
for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
|
||||||
struct idxd_group *g = idxd->groups[i];
|
struct idxd_group *g = idxd->groups[i];
|
||||||
|
|
||||||
tokens += g->tokens_reserved;
|
rdbufs += g->rdbufs_reserved;
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd->nr_tokens = idxd->max_tokens - tokens;
|
idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t group_read_buffers_reserved_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t group_tokens_reserved_show(struct device *dev,
|
static ssize_t group_tokens_reserved_show(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
|
||||||
|
return group_read_buffers_reserved_show(dev, attr, buf);
|
||||||
return sysfs_emit(buf, "%u\n", group->tokens_reserved);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t group_tokens_reserved_store(struct device *dev,
|
static ssize_t group_read_buffers_reserved_store(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
struct idxd_device *idxd = group->idxd;
|
struct idxd_device *idxd = group->idxd;
|
||||||
@ -143,33 +151,53 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
|
|||||||
if (idxd->state == IDXD_DEV_ENABLED)
|
if (idxd->state == IDXD_DEV_ENABLED)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (val > idxd->max_tokens)
|
if (val > idxd->max_rdbufs)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (val > idxd->nr_tokens + group->tokens_reserved)
|
if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
group->tokens_reserved = val;
|
group->rdbufs_reserved = val;
|
||||||
idxd_set_free_tokens(idxd);
|
idxd_set_free_rdbufs(idxd);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t group_tokens_reserved_store(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
|
||||||
|
return group_read_buffers_reserved_store(dev, attr, buf, count);
|
||||||
|
}
|
||||||
|
|
||||||
static struct device_attribute dev_attr_group_tokens_reserved =
|
static struct device_attribute dev_attr_group_tokens_reserved =
|
||||||
__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
|
__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
|
||||||
group_tokens_reserved_store);
|
group_tokens_reserved_store);
|
||||||
|
|
||||||
|
static struct device_attribute dev_attr_group_read_buffers_reserved =
|
||||||
|
__ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
|
||||||
|
group_read_buffers_reserved_store);
|
||||||
|
|
||||||
|
static ssize_t group_read_buffers_allowed_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t group_tokens_allowed_show(struct device *dev,
|
static ssize_t group_tokens_allowed_show(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
|
||||||
|
return group_read_buffers_allowed_show(dev, attr, buf);
|
||||||
return sysfs_emit(buf, "%u\n", group->tokens_allowed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t group_tokens_allowed_store(struct device *dev,
|
static ssize_t group_read_buffers_allowed_store(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
struct idxd_device *idxd = group->idxd;
|
struct idxd_device *idxd = group->idxd;
|
||||||
@ -190,29 +218,49 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (val < 4 * group->num_engines ||
|
if (val < 4 * group->num_engines ||
|
||||||
val > group->tokens_reserved + idxd->nr_tokens)
|
val > group->rdbufs_reserved + idxd->nr_rdbufs)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
group->tokens_allowed = val;
|
group->rdbufs_allowed = val;
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t group_tokens_allowed_store(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
|
||||||
|
return group_read_buffers_allowed_store(dev, attr, buf, count);
|
||||||
|
}
|
||||||
|
|
||||||
static struct device_attribute dev_attr_group_tokens_allowed =
|
static struct device_attribute dev_attr_group_tokens_allowed =
|
||||||
__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
|
__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
|
||||||
group_tokens_allowed_store);
|
group_tokens_allowed_store);
|
||||||
|
|
||||||
|
static struct device_attribute dev_attr_group_read_buffers_allowed =
|
||||||
|
__ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
|
||||||
|
group_read_buffers_allowed_store);
|
||||||
|
|
||||||
|
static ssize_t group_use_read_buffer_limit_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t group_use_token_limit_show(struct device *dev,
|
static ssize_t group_use_token_limit_show(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
|
||||||
|
return group_use_read_buffer_limit_show(dev, attr, buf);
|
||||||
return sysfs_emit(buf, "%u\n", group->use_token_limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t group_use_token_limit_store(struct device *dev,
|
static ssize_t group_use_read_buffer_limit_store(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct idxd_group *group = confdev_to_group(dev);
|
struct idxd_group *group = confdev_to_group(dev);
|
||||||
struct idxd_device *idxd = group->idxd;
|
struct idxd_device *idxd = group->idxd;
|
||||||
@ -232,17 +280,29 @@ static ssize_t group_use_token_limit_store(struct device *dev,
|
|||||||
if (idxd->state == IDXD_DEV_ENABLED)
|
if (idxd->state == IDXD_DEV_ENABLED)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (idxd->token_limit == 0)
|
if (idxd->rdbuf_limit == 0)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
group->use_token_limit = !!val;
|
group->use_rdbuf_limit = !!val;
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t group_use_token_limit_store(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
|
||||||
|
return group_use_read_buffer_limit_store(dev, attr, buf, count);
|
||||||
|
}
|
||||||
|
|
||||||
static struct device_attribute dev_attr_group_use_token_limit =
|
static struct device_attribute dev_attr_group_use_token_limit =
|
||||||
__ATTR(use_token_limit, 0644, group_use_token_limit_show,
|
__ATTR(use_token_limit, 0644, group_use_token_limit_show,
|
||||||
group_use_token_limit_store);
|
group_use_token_limit_store);
|
||||||
|
|
||||||
|
static struct device_attribute dev_attr_group_use_read_buffer_limit =
|
||||||
|
__ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
|
||||||
|
group_use_read_buffer_limit_store);
|
||||||
|
|
||||||
static ssize_t group_engines_show(struct device *dev,
|
static ssize_t group_engines_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
@ -387,8 +447,11 @@ static struct attribute *idxd_group_attributes[] = {
|
|||||||
&dev_attr_group_work_queues.attr,
|
&dev_attr_group_work_queues.attr,
|
||||||
&dev_attr_group_engines.attr,
|
&dev_attr_group_engines.attr,
|
||||||
&dev_attr_group_use_token_limit.attr,
|
&dev_attr_group_use_token_limit.attr,
|
||||||
|
&dev_attr_group_use_read_buffer_limit.attr,
|
||||||
&dev_attr_group_tokens_allowed.attr,
|
&dev_attr_group_tokens_allowed.attr,
|
||||||
|
&dev_attr_group_read_buffers_allowed.attr,
|
||||||
&dev_attr_group_tokens_reserved.attr,
|
&dev_attr_group_tokens_reserved.attr,
|
||||||
|
&dev_attr_group_read_buffers_reserved.attr,
|
||||||
&dev_attr_group_traffic_class_a.attr,
|
&dev_attr_group_traffic_class_a.attr,
|
||||||
&dev_attr_group_traffic_class_b.attr,
|
&dev_attr_group_traffic_class_b.attr,
|
||||||
NULL,
|
NULL,
|
||||||
@ -945,6 +1008,41 @@ static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *at
|
|||||||
static struct device_attribute dev_attr_wq_occupancy =
|
static struct device_attribute dev_attr_wq_occupancy =
|
||||||
__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
|
__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
|
||||||
|
|
||||||
|
static ssize_t wq_enqcmds_retries_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||||
|
|
||||||
|
if (wq_dedicated(wq))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||||
|
int rc;
|
||||||
|
unsigned int retries;
|
||||||
|
|
||||||
|
if (wq_dedicated(wq))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
rc = kstrtouint(buf, 10, &retries);
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (retries > IDXD_ENQCMDS_MAX_RETRIES)
|
||||||
|
retries = IDXD_ENQCMDS_MAX_RETRIES;
|
||||||
|
|
||||||
|
wq->enqcmds_retries = retries;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct device_attribute dev_attr_wq_enqcmds_retries =
|
||||||
|
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
|
||||||
|
|
||||||
static struct attribute *idxd_wq_attributes[] = {
|
static struct attribute *idxd_wq_attributes[] = {
|
||||||
&dev_attr_wq_clients.attr,
|
&dev_attr_wq_clients.attr,
|
||||||
&dev_attr_wq_state.attr,
|
&dev_attr_wq_state.attr,
|
||||||
@ -961,6 +1059,7 @@ static struct attribute *idxd_wq_attributes[] = {
|
|||||||
&dev_attr_wq_max_batch_size.attr,
|
&dev_attr_wq_max_batch_size.attr,
|
||||||
&dev_attr_wq_ats_disable.attr,
|
&dev_attr_wq_ats_disable.attr,
|
||||||
&dev_attr_wq_occupancy.attr,
|
&dev_attr_wq_occupancy.attr,
|
||||||
|
&dev_attr_wq_enqcmds_retries.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1156,26 +1255,42 @@ static ssize_t errors_show(struct device *dev,
|
|||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(errors);
|
static DEVICE_ATTR_RO(errors);
|
||||||
|
|
||||||
static ssize_t max_tokens_show(struct device *dev,
|
static ssize_t max_read_buffers_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||||
|
|
||||||
return sysfs_emit(buf, "%u\n", idxd->max_tokens);
|
return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t max_tokens_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
|
||||||
|
return max_read_buffers_show(dev, attr, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR_RO(max_tokens); /* deprecated */
|
||||||
|
static DEVICE_ATTR_RO(max_read_buffers);
|
||||||
|
|
||||||
|
static ssize_t read_buffer_limit_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(max_tokens);
|
|
||||||
|
|
||||||
static ssize_t token_limit_show(struct device *dev,
|
static ssize_t token_limit_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
|
||||||
|
return read_buffer_limit_show(dev, attr, buf);
|
||||||
return sysfs_emit(buf, "%u\n", idxd->token_limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t token_limit_store(struct device *dev,
|
static ssize_t read_buffer_limit_store(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
@ -1191,16 +1306,26 @@ static ssize_t token_limit_store(struct device *dev,
|
|||||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (!idxd->hw.group_cap.token_limit)
|
if (!idxd->hw.group_cap.rdbuf_limit)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (val > idxd->hw.group_cap.total_tokens)
|
if (val > idxd->hw.group_cap.total_rdbufs)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
idxd->token_limit = val;
|
idxd->rdbuf_limit = val;
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RW(token_limit);
|
|
||||||
|
static ssize_t token_limit_store(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
|
||||||
|
return read_buffer_limit_store(dev, attr, buf, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR_RW(token_limit); /* deprecated */
|
||||||
|
static DEVICE_ATTR_RW(read_buffer_limit);
|
||||||
|
|
||||||
static ssize_t cdev_major_show(struct device *dev,
|
static ssize_t cdev_major_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
@ -1246,7 +1371,9 @@ static struct attribute *idxd_device_attributes[] = {
|
|||||||
&dev_attr_state.attr,
|
&dev_attr_state.attr,
|
||||||
&dev_attr_errors.attr,
|
&dev_attr_errors.attr,
|
||||||
&dev_attr_max_tokens.attr,
|
&dev_attr_max_tokens.attr,
|
||||||
|
&dev_attr_max_read_buffers.attr,
|
||||||
&dev_attr_token_limit.attr,
|
&dev_attr_token_limit.attr,
|
||||||
|
&dev_attr_read_buffer_limit.attr,
|
||||||
&dev_attr_cdev_major.attr,
|
&dev_attr_cdev_major.attr,
|
||||||
&dev_attr_cmd_status.attr,
|
&dev_attr_cmd_status.attr,
|
||||||
NULL,
|
NULL,
|
||||||
@ -1268,8 +1395,6 @@ static void idxd_conf_device_release(struct device *dev)
|
|||||||
kfree(idxd->groups);
|
kfree(idxd->groups);
|
||||||
kfree(idxd->wqs);
|
kfree(idxd->wqs);
|
||||||
kfree(idxd->engines);
|
kfree(idxd->engines);
|
||||||
kfree(idxd->irq_entries);
|
|
||||||
kfree(idxd->int_handles);
|
|
||||||
ida_free(&idxd_ida, idxd->id);
|
ida_free(&idxd_ida, idxd->id);
|
||||||
kfree(idxd);
|
kfree(idxd);
|
||||||
}
|
}
|
||||||
|
@ -741,9 +741,8 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
|
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
|
||||||
if (!buf_virt) {
|
if (!buf_virt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sdma->channel_0_lock, flags);
|
spin_lock_irqsave(&sdma->channel_0_lock, flags);
|
||||||
|
|
||||||
@ -1227,8 +1226,9 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|||||||
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
|
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
|
||||||
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
|
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
|
||||||
sdma_set_watermarklevel_for_p2p(sdmac);
|
sdma_set_watermarklevel_for_p2p(sdmac);
|
||||||
} else
|
} else {
|
||||||
__set_bit(sdmac->event_id0, sdmac->event_mask);
|
__set_bit(sdmac->event_id0, sdmac->event_mask);
|
||||||
|
}
|
||||||
|
|
||||||
/* Address */
|
/* Address */
|
||||||
sdmac->shp_addr = sdmac->per_address;
|
sdmac->shp_addr = sdmac->per_address;
|
||||||
@ -1241,7 +1241,7 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
||||||
unsigned int priority)
|
unsigned int priority)
|
||||||
{
|
{
|
||||||
struct sdma_engine *sdma = sdmac->sdma;
|
struct sdma_engine *sdma = sdmac->sdma;
|
||||||
int channel = sdmac->channel;
|
int channel = sdmac->channel;
|
||||||
@ -1261,7 +1261,7 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
|
|||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
|
|
||||||
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
|
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
|
||||||
GFP_NOWAIT);
|
GFP_NOWAIT);
|
||||||
if (!sdma->bd0) {
|
if (!sdma->bd0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -1284,7 +1284,7 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
|
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
|
||||||
&desc->bd_phys, GFP_NOWAIT);
|
&desc->bd_phys, GFP_NOWAIT);
|
||||||
if (!desc->bd) {
|
if (!desc->bd) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -1757,7 +1757,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
|
|||||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
|
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
|
||||||
|
|
||||||
static void sdma_add_scripts(struct sdma_engine *sdma,
|
static void sdma_add_scripts(struct sdma_engine *sdma,
|
||||||
const struct sdma_script_start_addrs *addr)
|
const struct sdma_script_start_addrs *addr)
|
||||||
{
|
{
|
||||||
s32 *addr_arr = (u32 *)addr;
|
s32 *addr_arr = (u32 *)addr;
|
||||||
s32 *saddr_arr = (u32 *)sdma->script_addrs;
|
s32 *saddr_arr = (u32 *)sdma->script_addrs;
|
||||||
@ -1840,8 +1840,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
|
|||||||
clk_enable(sdma->clk_ahb);
|
clk_enable(sdma->clk_ahb);
|
||||||
/* download the RAM image for SDMA */
|
/* download the RAM image for SDMA */
|
||||||
sdma_load_script(sdma, ram_code,
|
sdma_load_script(sdma, ram_code,
|
||||||
header->ram_code_size,
|
header->ram_code_size,
|
||||||
addr->ram_code_start_addr);
|
addr->ram_code_start_addr);
|
||||||
clk_disable(sdma->clk_ipg);
|
clk_disable(sdma->clk_ipg);
|
||||||
clk_disable(sdma->clk_ahb);
|
clk_disable(sdma->clk_ahb);
|
||||||
|
|
||||||
@ -1850,8 +1850,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
|
|||||||
sdma->fw_loaded = true;
|
sdma->fw_loaded = true;
|
||||||
|
|
||||||
dev_info(sdma->dev, "loaded firmware %d.%d\n",
|
dev_info(sdma->dev, "loaded firmware %d.%d\n",
|
||||||
header->version_major,
|
header->version_major,
|
||||||
header->version_minor);
|
header->version_minor);
|
||||||
|
|
||||||
err_firmware:
|
err_firmware:
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
@ -1955,7 +1955,7 @@ static int sdma_init(struct sdma_engine *sdma)
|
|||||||
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
|
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
|
||||||
|
|
||||||
sdma->channel_control = dma_alloc_coherent(sdma->dev,
|
sdma->channel_control = dma_alloc_coherent(sdma->dev,
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
|
||||||
sizeof(struct sdma_context_data),
|
sizeof(struct sdma_context_data),
|
||||||
&ccb_phys, GFP_KERNEL);
|
&ccb_phys, GFP_KERNEL);
|
||||||
|
|
||||||
@ -1965,9 +1965,9 @@ static int sdma_init(struct sdma_engine *sdma)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sdma->context = (void *)sdma->channel_control +
|
sdma->context = (void *)sdma->channel_control +
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||||
sdma->context_phys = ccb_phys +
|
sdma->context_phys = ccb_phys +
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||||
|
|
||||||
/* disable all channels */
|
/* disable all channels */
|
||||||
for (i = 0; i < sdma->drvdata->num_events; i++)
|
for (i = 0; i < sdma->drvdata->num_events; i++)
|
||||||
|
@ -1363,15 +1363,9 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
if (!iomap)
|
if (!iomap)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (err)
|
if (err)
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
||||||
if (err)
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -158,8 +158,9 @@ static struct attribute *ioat_attrs[] = {
|
|||||||
&intr_coalesce_attr.attr,
|
&intr_coalesce_attr.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
ATTRIBUTE_GROUPS(ioat);
|
||||||
|
|
||||||
struct kobj_type ioat_ktype = {
|
struct kobj_type ioat_ktype = {
|
||||||
.sysfs_ops = &ioat_sysfs_ops,
|
.sysfs_ops = &ioat_sysfs_ops,
|
||||||
.default_attrs = ioat_attrs,
|
.default_groups = ioat_groups,
|
||||||
};
|
};
|
||||||
|
@ -269,7 +269,7 @@ milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
if (!md)
|
if (!md)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
|
md->sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
|
||||||
if (!md->sgl) {
|
if (!md->sgl) {
|
||||||
kfree(md);
|
kfree(md);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1117,6 +1117,7 @@ static int mmp_pdma_probe(struct platform_device *op)
|
|||||||
mmp_pdma_dma_xlate, pdev);
|
mmp_pdma_dma_xlate, pdev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&op->dev, "of_dma_controller_register failed\n");
|
dev_err(&op->dev, "of_dma_controller_register failed\n");
|
||||||
|
dma_async_device_unregister(&pdev->device);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,7 @@ struct mv_xor_v2_descriptor {
|
|||||||
* @desc_size: HW descriptor size
|
* @desc_size: HW descriptor size
|
||||||
* @npendings: number of pending descriptors (for which tx_submit has
|
* @npendings: number of pending descriptors (for which tx_submit has
|
||||||
* @hw_queue_idx: HW queue index
|
* @hw_queue_idx: HW queue index
|
||||||
* @msi_desc: local interrupt descriptor information
|
* @irq: The Linux interrupt number
|
||||||
* been called, but not yet issue_pending)
|
* been called, but not yet issue_pending)
|
||||||
*/
|
*/
|
||||||
struct mv_xor_v2_device {
|
struct mv_xor_v2_device {
|
||||||
@ -168,7 +168,7 @@ struct mv_xor_v2_device {
|
|||||||
int desc_size;
|
int desc_size;
|
||||||
unsigned int npendings;
|
unsigned int npendings;
|
||||||
unsigned int hw_queue_idx;
|
unsigned int hw_queue_idx;
|
||||||
struct msi_desc *msi_desc;
|
unsigned int irq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -718,7 +718,6 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
struct dma_device *dma_dev;
|
struct dma_device *dma_dev;
|
||||||
struct mv_xor_v2_sw_desc *sw_desc;
|
struct mv_xor_v2_sw_desc *sw_desc;
|
||||||
struct msi_desc *msi_desc;
|
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
|
BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
|
||||||
MV_XOR_V2_EXT_DESC_SIZE);
|
MV_XOR_V2_EXT_DESC_SIZE);
|
||||||
@ -770,14 +769,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto disable_clk;
|
goto disable_clk;
|
||||||
|
|
||||||
msi_desc = first_msi_entry(&pdev->dev);
|
xor_dev->irq = msi_get_virq(&pdev->dev, 0);
|
||||||
if (!msi_desc) {
|
|
||||||
ret = -ENODEV;
|
|
||||||
goto free_msi_irqs;
|
|
||||||
}
|
|
||||||
xor_dev->msi_desc = msi_desc;
|
|
||||||
|
|
||||||
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
|
ret = devm_request_irq(&pdev->dev, xor_dev->irq,
|
||||||
mv_xor_v2_interrupt_handler, 0,
|
mv_xor_v2_interrupt_handler, 0,
|
||||||
dev_name(&pdev->dev), xor_dev);
|
dev_name(&pdev->dev), xor_dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -892,7 +886,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
|
|||||||
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
|
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
|
||||||
xor_dev->hw_desq_virt, xor_dev->hw_desq);
|
xor_dev->hw_desq_virt, xor_dev->hw_desq);
|
||||||
|
|
||||||
devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
|
devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
|
||||||
|
|
||||||
platform_msi_domain_free_irqs(&pdev->dev);
|
platform_msi_domain_free_irqs(&pdev->dev);
|
||||||
|
|
||||||
|
@ -835,7 +835,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
|||||||
goto err_disable_pdev;
|
goto err_disable_pdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
|
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
|
||||||
goto err_free_res;
|
goto err_free_res;
|
||||||
|
@ -563,15 +563,9 @@ static int plx_dma_probe(struct pci_dev *pdev,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
|
|
||||||
if (rc)
|
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -3240,7 +3240,6 @@ static int ppc440spe_adma_dma2rxor_prep_src(
|
|||||||
struct ppc440spe_rxor *cursor, int index,
|
struct ppc440spe_rxor *cursor, int index,
|
||||||
int src_cnt, u32 addr)
|
int src_cnt, u32 addr)
|
||||||
{
|
{
|
||||||
int rval = 0;
|
|
||||||
u32 sign;
|
u32 sign;
|
||||||
struct ppc440spe_adma_desc_slot *desc = hdesc;
|
struct ppc440spe_adma_desc_slot *desc = hdesc;
|
||||||
int i;
|
int i;
|
||||||
@ -3348,7 +3347,7 @@ static int ppc440spe_adma_dma2rxor_prep_src(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return rval;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -742,8 +742,7 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
|
|||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
sw_desc = kzalloc(sizeof(*sw_desc) +
|
sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
|
||||||
nb_hw_desc * sizeof(struct pxad_desc_hw *),
|
|
||||||
GFP_NOWAIT);
|
GFP_NOWAIT);
|
||||||
if (!sw_desc)
|
if (!sw_desc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -388,6 +388,8 @@ struct bam_device {
|
|||||||
/* execution environment ID, from DT */
|
/* execution environment ID, from DT */
|
||||||
u32 ee;
|
u32 ee;
|
||||||
bool controlled_remotely;
|
bool controlled_remotely;
|
||||||
|
bool powered_remotely;
|
||||||
|
u32 active_channels;
|
||||||
|
|
||||||
const struct reg_offset_data *layout;
|
const struct reg_offset_data *layout;
|
||||||
|
|
||||||
@ -415,6 +417,44 @@ static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
|
|||||||
r.ee_mult * bdev->ee;
|
r.ee_mult * bdev->ee;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bam_reset() - reset and initialize BAM registers
|
||||||
|
* @bdev: bam device
|
||||||
|
*/
|
||||||
|
static void bam_reset(struct bam_device *bdev)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* s/w reset bam */
|
||||||
|
/* after reset all pipes are disabled and idle */
|
||||||
|
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val |= BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val &= ~BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
|
||||||
|
/* make sure previous stores are visible before enabling BAM */
|
||||||
|
wmb();
|
||||||
|
|
||||||
|
/* enable bam */
|
||||||
|
val |= BAM_EN;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
|
||||||
|
/* set descriptor threshhold, start with 4 bytes */
|
||||||
|
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
||||||
|
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||||
|
|
||||||
|
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
|
||||||
|
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
|
||||||
|
|
||||||
|
/* enable irqs for errors */
|
||||||
|
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
|
||||||
|
bam_addr(bdev, 0, BAM_IRQ_EN));
|
||||||
|
|
||||||
|
/* unmask global bam interrupt */
|
||||||
|
writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bam_reset_channel - Reset individual BAM DMA channel
|
* bam_reset_channel - Reset individual BAM DMA channel
|
||||||
* @bchan: bam channel
|
* @bchan: bam channel
|
||||||
@ -512,6 +552,9 @@ static int bam_alloc_chan(struct dma_chan *chan)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bdev->active_channels++ == 0 && bdev->powered_remotely)
|
||||||
|
bam_reset(bdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -565,6 +608,13 @@ static void bam_free_chan(struct dma_chan *chan)
|
|||||||
/* disable irq */
|
/* disable irq */
|
||||||
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
|
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
|
||||||
|
|
||||||
|
if (--bdev->active_channels == 0 && bdev->powered_remotely) {
|
||||||
|
/* s/w reset bam */
|
||||||
|
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val |= BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
}
|
||||||
|
|
||||||
err:
|
err:
|
||||||
pm_runtime_mark_last_busy(bdev->dev);
|
pm_runtime_mark_last_busy(bdev->dev);
|
||||||
pm_runtime_put_autosuspend(bdev->dev);
|
pm_runtime_put_autosuspend(bdev->dev);
|
||||||
@ -1164,37 +1214,9 @@ static int bam_init(struct bam_device *bdev)
|
|||||||
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
|
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdev->controlled_remotely)
|
/* Reset BAM now if fully controlled locally */
|
||||||
return 0;
|
if (!bdev->controlled_remotely && !bdev->powered_remotely)
|
||||||
|
bam_reset(bdev);
|
||||||
/* s/w reset bam */
|
|
||||||
/* after reset all pipes are disabled and idle */
|
|
||||||
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
val |= BAM_SW_RST;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
val &= ~BAM_SW_RST;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
|
|
||||||
/* make sure previous stores are visible before enabling BAM */
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* enable bam */
|
|
||||||
val |= BAM_EN;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
|
|
||||||
/* set descriptor threshhold, start with 4 bytes */
|
|
||||||
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
|
||||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
|
||||||
|
|
||||||
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
|
|
||||||
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
|
|
||||||
|
|
||||||
/* enable irqs for errors */
|
|
||||||
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
|
|
||||||
bam_addr(bdev, 0, BAM_IRQ_EN));
|
|
||||||
|
|
||||||
/* unmask global bam interrupt */
|
|
||||||
writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1257,8 +1279,10 @@ static int bam_dma_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
|
bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
|
||||||
"qcom,controlled-remotely");
|
"qcom,controlled-remotely");
|
||||||
|
bdev->powered_remotely = of_property_read_bool(pdev->dev.of_node,
|
||||||
|
"qcom,powered-remotely");
|
||||||
|
|
||||||
if (bdev->controlled_remotely) {
|
if (bdev->controlled_remotely || bdev->powered_remotely) {
|
||||||
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
|
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
|
||||||
&bdev->num_channels);
|
&bdev->num_channels);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1270,7 +1294,7 @@ static int bam_dma_probe(struct platform_device *pdev)
|
|||||||
dev_err(bdev->dev, "num-ees unspecified in dt\n");
|
dev_err(bdev->dev, "num-ees unspecified in dt\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdev->controlled_remotely)
|
if (bdev->controlled_remotely || bdev->powered_remotely)
|
||||||
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
|
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
|
||||||
else
|
else
|
||||||
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
||||||
|
@ -2206,10 +2206,8 @@ static int gpi_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
/* set up irq */
|
/* set up irq */
|
||||||
ret = platform_get_irq(pdev, i);
|
ret = platform_get_irq(pdev, i);
|
||||||
if (ret < 0) {
|
if (ret < 0)
|
||||||
dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
gpii->irq = ret;
|
gpii->irq = ret;
|
||||||
|
|
||||||
/* set up channel specific register info */
|
/* set up channel specific register info */
|
||||||
|
@ -666,7 +666,7 @@ static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
|
|||||||
struct device *dev = msi_desc_to_dev(desc);
|
struct device *dev = msi_desc_to_dev(desc);
|
||||||
struct hidma_dev *dmadev = dev_get_drvdata(dev);
|
struct hidma_dev *dmadev = dev_get_drvdata(dev);
|
||||||
|
|
||||||
if (!desc->platform.msi_index) {
|
if (!desc->msi_index) {
|
||||||
writel(msg->address_lo, dmadev->dev_evca + 0x118);
|
writel(msg->address_lo, dmadev->dev_evca + 0x118);
|
||||||
writel(msg->address_hi, dmadev->dev_evca + 0x11C);
|
writel(msg->address_hi, dmadev->dev_evca + 0x11C);
|
||||||
writel(msg->data, dmadev->dev_evca + 0x120);
|
writel(msg->data, dmadev->dev_evca + 0x120);
|
||||||
@ -678,11 +678,13 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
struct device *dev = dmadev->ddev.dev;
|
struct device *dev = dmadev->ddev.dev;
|
||||||
struct msi_desc *desc;
|
int i, virq;
|
||||||
|
|
||||||
/* free allocated MSI interrupts above */
|
for (i = 0; i < HIDMA_MSI_INTS; i++) {
|
||||||
for_each_msi_entry(desc, dev)
|
virq = msi_get_virq(dev, i);
|
||||||
devm_free_irq(dev, desc->irq, &dmadev->lldev);
|
if (virq)
|
||||||
|
devm_free_irq(dev, virq, &dmadev->lldev);
|
||||||
|
}
|
||||||
|
|
||||||
platform_msi_domain_free_irqs(dev);
|
platform_msi_domain_free_irqs(dev);
|
||||||
#endif
|
#endif
|
||||||
@ -692,45 +694,37 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
|
|||||||
struct platform_device *pdev)
|
struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
int rc;
|
int rc, i, virq;
|
||||||
struct msi_desc *desc;
|
|
||||||
struct msi_desc *failed_desc = NULL;
|
|
||||||
|
|
||||||
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
|
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
|
||||||
hidma_write_msi_msg);
|
hidma_write_msi_msg);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
for_each_msi_entry(desc, &pdev->dev) {
|
for (i = 0; i < HIDMA_MSI_INTS; i++) {
|
||||||
if (!desc->platform.msi_index)
|
virq = msi_get_virq(&pdev->dev, i);
|
||||||
dmadev->msi_virqbase = desc->irq;
|
rc = devm_request_irq(&pdev->dev, virq,
|
||||||
|
|
||||||
rc = devm_request_irq(&pdev->dev, desc->irq,
|
|
||||||
hidma_chirq_handler_msi,
|
hidma_chirq_handler_msi,
|
||||||
0, "qcom-hidma-msi",
|
0, "qcom-hidma-msi",
|
||||||
&dmadev->lldev);
|
&dmadev->lldev);
|
||||||
if (rc) {
|
if (rc)
|
||||||
failed_desc = desc;
|
|
||||||
break;
|
break;
|
||||||
}
|
if (!i)
|
||||||
|
dmadev->msi_virqbase = virq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
/* free allocated MSI interrupts above */
|
/* free allocated MSI interrupts above */
|
||||||
for_each_msi_entry(desc, &pdev->dev) {
|
for (--i; i >= 0; i--) {
|
||||||
if (desc == failed_desc)
|
virq = msi_get_virq(&pdev->dev, i);
|
||||||
break;
|
devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
|
||||||
devm_free_irq(&pdev->dev, desc->irq,
|
|
||||||
&dmadev->lldev);
|
|
||||||
}
|
}
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"failed to request MSI irq, falling back to wired IRQ\n");
|
||||||
} else {
|
} else {
|
||||||
/* Add callback to free MSIs on teardown */
|
/* Add callback to free MSIs on teardown */
|
||||||
hidma_ll_setup_irq(dmadev->lldev, true);
|
hidma_ll_setup_irq(dmadev->lldev, true);
|
||||||
|
|
||||||
}
|
}
|
||||||
if (rc)
|
|
||||||
dev_warn(&pdev->dev,
|
|
||||||
"failed to request MSI irq, falling back to wired IRQ\n");
|
|
||||||
return rc;
|
return rc;
|
||||||
#else
|
#else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/dma/qcom_adm.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
@ -140,6 +141,8 @@ struct adm_chan {
|
|||||||
|
|
||||||
struct adm_async_desc *curr_txd;
|
struct adm_async_desc *curr_txd;
|
||||||
struct dma_slave_config slave;
|
struct dma_slave_config slave;
|
||||||
|
u32 crci;
|
||||||
|
u32 mux;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
|
|
||||||
int error;
|
int error;
|
||||||
@ -379,8 +382,8 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
|
|||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
crci = achan->slave.slave_id & 0xf;
|
crci = achan->crci & 0xf;
|
||||||
if (!crci || achan->slave.slave_id > 0x1f) {
|
if (!crci || achan->crci > 0x1f) {
|
||||||
dev_err(adev->dev, "invalid crci value\n");
|
dev_err(adev->dev, "invalid crci value\n");
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
@ -403,9 +406,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
|
|||||||
if (!async_desc)
|
if (!async_desc)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
if (crci)
|
async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
|
||||||
async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
|
|
||||||
ADM_CRCI_CTL_MUX_SEL : 0;
|
|
||||||
async_desc->crci = crci;
|
async_desc->crci = crci;
|
||||||
async_desc->blk_size = blk_size;
|
async_desc->blk_size = blk_size;
|
||||||
async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
|
async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
|
||||||
@ -488,10 +489,13 @@ static int adm_terminate_all(struct dma_chan *chan)
|
|||||||
static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
|
static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
|
||||||
{
|
{
|
||||||
struct adm_chan *achan = to_adm_chan(chan);
|
struct adm_chan *achan = to_adm_chan(chan);
|
||||||
|
struct qcom_adm_peripheral_config *config = cfg->peripheral_config;
|
||||||
unsigned long flag;
|
unsigned long flag;
|
||||||
|
|
||||||
spin_lock_irqsave(&achan->vc.lock, flag);
|
spin_lock_irqsave(&achan->vc.lock, flag);
|
||||||
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
|
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
|
||||||
|
if (cfg->peripheral_size == sizeof(config))
|
||||||
|
achan->crci = config->crci;
|
||||||
spin_unlock_irqrestore(&achan->vc.lock, flag);
|
spin_unlock_irqrestore(&achan->vc.lock, flag);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -694,6 +698,45 @@ static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
|
|||||||
achan->vc.desc_free = adm_dma_free_desc;
|
achan->vc.desc_free = adm_dma_free_desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adm_dma_xlate
|
||||||
|
* @dma_spec: pointer to DMA specifier as found in the device tree
|
||||||
|
* @ofdma: pointer to DMA controller data
|
||||||
|
*
|
||||||
|
* This can use either 1-cell or 2-cell formats, the first cell
|
||||||
|
* identifies the slave device, while the optional second cell
|
||||||
|
* contains the crci value.
|
||||||
|
*
|
||||||
|
* Returns pointer to appropriate dma channel on success or NULL on error.
|
||||||
|
*/
|
||||||
|
static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
|
||||||
|
struct of_dma *ofdma)
|
||||||
|
{
|
||||||
|
struct dma_device *dev = ofdma->of_dma_data;
|
||||||
|
struct dma_chan *chan, *candidate = NULL;
|
||||||
|
struct adm_chan *achan;
|
||||||
|
|
||||||
|
if (!dev || dma_spec->args_count > 2)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
list_for_each_entry(chan, &dev->channels, device_node)
|
||||||
|
if (chan->chan_id == dma_spec->args[0]) {
|
||||||
|
candidate = chan;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!candidate)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
achan = to_adm_chan(candidate);
|
||||||
|
if (dma_spec->args_count == 2)
|
||||||
|
achan->crci = dma_spec->args[1];
|
||||||
|
else
|
||||||
|
achan->crci = 0;
|
||||||
|
|
||||||
|
return dma_get_slave_channel(candidate);
|
||||||
|
}
|
||||||
|
|
||||||
static int adm_dma_probe(struct platform_device *pdev)
|
static int adm_dma_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct adm_device *adev;
|
struct adm_device *adev;
|
||||||
@ -838,8 +881,7 @@ static int adm_dma_probe(struct platform_device *pdev)
|
|||||||
goto err_disable_clks;
|
goto err_disable_clks;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
|
||||||
of_dma_xlate_by_chan_id,
|
|
||||||
&adev->common);
|
&adev->common);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unregister_dma;
|
goto err_unregister_dma;
|
||||||
|
@ -1001,7 +1001,7 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sa11x0_dma_suspend(struct device *dev)
|
static __maybe_unused int sa11x0_dma_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||||||
unsigned pch;
|
unsigned pch;
|
||||||
@ -1039,7 +1039,7 @@ static int sa11x0_dma_suspend(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sa11x0_dma_resume(struct device *dev)
|
static __maybe_unused int sa11x0_dma_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||||||
unsigned pch;
|
unsigned pch;
|
||||||
@ -1072,12 +1072,7 @@ static int sa11x0_dma_resume(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
||||||
.suspend_noirq = sa11x0_dma_suspend,
|
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume)
|
||||||
.resume_noirq = sa11x0_dma_resume,
|
|
||||||
.freeze_noirq = sa11x0_dma_suspend,
|
|
||||||
.thaw_noirq = sa11x0_dma_resume,
|
|
||||||
.poweroff_noirq = sa11x0_dma_suspend,
|
|
||||||
.restore_noirq = sa11x0_dma_resume,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver sa11x0_dma_driver = {
|
static struct platform_driver sa11x0_dma_driver = {
|
||||||
|
@ -236,7 +236,7 @@ struct rcar_dmac_of_data {
|
|||||||
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
|
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
|
||||||
#define RCAR_DMAOR_AE (1 << 2)
|
#define RCAR_DMAOR_AE (1 << 2)
|
||||||
#define RCAR_DMAOR_DME (1 << 0)
|
#define RCAR_DMAOR_DME (1 << 0)
|
||||||
#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
|
#define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */
|
||||||
#define RCAR_DMADPSEC 0x00a0
|
#define RCAR_DMADPSEC 0x00a0
|
||||||
|
|
||||||
#define RCAR_DMASAR 0x0000
|
#define RCAR_DMASAR 0x0000
|
||||||
@ -299,8 +299,8 @@ struct rcar_dmac_of_data {
|
|||||||
#define RCAR_DMAFIXDAR 0x0014
|
#define RCAR_DMAFIXDAR 0x0014
|
||||||
#define RCAR_DMAFIXDPBASE 0x0060
|
#define RCAR_DMAFIXDPBASE 0x0060
|
||||||
|
|
||||||
/* For R-Car V3U */
|
/* For R-Car Gen4 */
|
||||||
#define RCAR_V3U_DMACHCLR 0x0100
|
#define RCAR_GEN4_DMACHCLR 0x0100
|
||||||
|
|
||||||
/* Hardcode the MEMCPY transfer size to 4 bytes. */
|
/* Hardcode the MEMCPY transfer size to 4 bytes. */
|
||||||
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
|
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
|
||||||
@ -345,7 +345,7 @@ static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
|
|||||||
struct rcar_dmac_chan *chan)
|
struct rcar_dmac_chan *chan)
|
||||||
{
|
{
|
||||||
if (dmac->chan_base)
|
if (dmac->chan_base)
|
||||||
rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
|
rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
|
||||||
else
|
else
|
||||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
|
rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
|
||||||
}
|
}
|
||||||
@ -357,7 +357,7 @@ static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
|
|||||||
|
|
||||||
if (dmac->chan_base) {
|
if (dmac->chan_base) {
|
||||||
for_each_rcar_dmac_chan(i, dmac, chan)
|
for_each_rcar_dmac_chan(i, dmac, chan)
|
||||||
rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
|
rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
|
||||||
} else {
|
} else {
|
||||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
||||||
}
|
}
|
||||||
@ -1921,7 +1921,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
||||||
return ret;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rcar_dmac_init(dmac);
|
ret = rcar_dmac_init(dmac);
|
||||||
@ -1929,7 +1929,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "failed to reset device\n");
|
dev_err(&pdev->dev, "failed to reset device\n");
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize engine */
|
/* Initialize engine */
|
||||||
@ -1963,14 +1963,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||||||
for_each_rcar_dmac_chan(i, dmac, chan) {
|
for_each_rcar_dmac_chan(i, dmac, chan) {
|
||||||
ret = rcar_dmac_chan_probe(dmac, chan);
|
ret = rcar_dmac_chan_probe(dmac, chan);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register the DMAC as a DMA provider for DT. */
|
/* Register the DMAC as a DMA provider for DT. */
|
||||||
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
|
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
|
||||||
NULL);
|
NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register the DMA engine device.
|
* Register the DMA engine device.
|
||||||
@ -1979,12 +1979,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||||||
*/
|
*/
|
||||||
ret = dma_async_device_register(engine);
|
ret = dma_async_device_register(engine);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_dma_free;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
err_dma_free:
|
||||||
of_dma_controller_free(pdev->dev.of_node);
|
of_dma_controller_free(pdev->dev.of_node);
|
||||||
|
err_pm_disable:
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2013,7 +2014,7 @@ static const struct rcar_dmac_of_data rcar_dmac_data = {
|
|||||||
.chan_offset_stride = 0x80,
|
.chan_offset_stride = 0x80,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
|
static const struct rcar_dmac_of_data rcar_gen4_dmac_data = {
|
||||||
.chan_offset_base = 0x0,
|
.chan_offset_base = 0x0,
|
||||||
.chan_offset_stride = 0x1000,
|
.chan_offset_stride = 0x1000,
|
||||||
};
|
};
|
||||||
@ -2022,9 +2023,12 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
|
|||||||
{
|
{
|
||||||
.compatible = "renesas,rcar-dmac",
|
.compatible = "renesas,rcar-dmac",
|
||||||
.data = &rcar_dmac_data,
|
.data = &rcar_dmac_data,
|
||||||
|
}, {
|
||||||
|
.compatible = "renesas,rcar-gen4-dmac",
|
||||||
|
.data = &rcar_gen4_dmac_data,
|
||||||
}, {
|
}, {
|
||||||
.compatible = "renesas,dmac-r8a779a0",
|
.compatible = "renesas,dmac-r8a779a0",
|
||||||
.data = &rcar_v3u_dmac_data,
|
.data = &rcar_gen4_dmac_data,
|
||||||
},
|
},
|
||||||
{ /* Sentinel */ }
|
{ /* Sentinel */ }
|
||||||
};
|
};
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
@ -573,7 +574,7 @@ static void rz_dmac_issue_pending(struct dma_chan *chan)
|
|||||||
static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
|
static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
|
||||||
{
|
{
|
||||||
u8 i;
|
u8 i;
|
||||||
const enum dma_slave_buswidth ds_lut[] = {
|
static const enum dma_slave_buswidth ds_lut[] = {
|
||||||
DMA_SLAVE_BUSWIDTH_1_BYTE,
|
DMA_SLAVE_BUSWIDTH_1_BYTE,
|
||||||
DMA_SLAVE_BUSWIDTH_2_BYTES,
|
DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
DMA_SLAVE_BUSWIDTH_4_BYTES,
|
DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||||
@ -872,6 +873,13 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
|||||||
/* Initialize the channels. */
|
/* Initialize the channels. */
|
||||||
INIT_LIST_HEAD(&dmac->engine.channels);
|
INIT_LIST_HEAD(&dmac->engine.channels);
|
||||||
|
|
||||||
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
|
||||||
|
goto err_pm_disable;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < dmac->n_channels; i++) {
|
for (i = 0; i < dmac->n_channels; i++) {
|
||||||
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -925,6 +933,10 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
|||||||
channel->lmdesc.base_dma);
|
channel->lmdesc.base_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
|
err_pm_disable:
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -943,6 +955,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
of_dma_controller_free(pdev->dev.of_node);
|
of_dma_controller_free(pdev->dev.of_node);
|
||||||
dma_async_device_unregister(&dmac->engine);
|
dma_async_device_unregister(&dmac->engine);
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -788,14 +788,6 @@ static int shdma_config(struct dma_chan *chan,
|
|||||||
if (!config)
|
if (!config)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
|
||||||
* overriding the slave_id through dma_slave_config is deprecated,
|
|
||||||
* but possibly some out-of-tree drivers still do it.
|
|
||||||
*/
|
|
||||||
if (WARN_ON_ONCE(config->slave_id &&
|
|
||||||
config->slave_id != schan->real_slave_id))
|
|
||||||
schan->real_slave_id = config->slave_id;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We could lock this, but you shouldn't be configuring the
|
* We could lock this, but you shouldn't be configuring the
|
||||||
* channel, while using it...
|
* channel, while using it...
|
||||||
@ -1044,9 +1036,7 @@ EXPORT_SYMBOL(shdma_cleanup);
|
|||||||
|
|
||||||
static int __init shdma_enter(void)
|
static int __init shdma_enter(void)
|
||||||
{
|
{
|
||||||
shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
|
shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
|
||||||
sizeof(long),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!shdma_slave_used)
|
if (!shdma_slave_used)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
return 0;
|
return 0;
|
||||||
@ -1055,7 +1045,7 @@ module_init(shdma_enter);
|
|||||||
|
|
||||||
static void __exit shdma_exit(void)
|
static void __exit shdma_exit(void)
|
||||||
{
|
{
|
||||||
kfree(shdma_slave_used);
|
bitmap_free(shdma_slave_used);
|
||||||
}
|
}
|
||||||
module_exit(shdma_exit);
|
module_exit(shdma_exit);
|
||||||
|
|
||||||
|
@ -795,9 +795,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
|
|||||||
return dst_datawidth;
|
return dst_datawidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (slave_cfg->slave_id)
|
|
||||||
schan->dev_id = slave_cfg->slave_id;
|
|
||||||
|
|
||||||
hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
|
hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -496,6 +496,7 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
|
|||||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||||
|
|
||||||
if (chan->desc) {
|
if (chan->desc) {
|
||||||
|
dma_cookie_complete(&chan->desc->vdesc.tx);
|
||||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||||
if (chan->busy)
|
if (chan->busy)
|
||||||
stm32_dma_stop(chan);
|
stm32_dma_stop(chan);
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
* Inspired by stm32-dma.c and dma-jz4780.c
|
* Inspired by stm32-dma.c and dma-jz4780.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
@ -32,13 +33,6 @@
|
|||||||
|
|
||||||
#include "virt-dma.h"
|
#include "virt-dma.h"
|
||||||
|
|
||||||
/* MDMA Generic getter/setter */
|
|
||||||
#define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
|
|
||||||
#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
|
|
||||||
(mask))
|
|
||||||
#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
|
|
||||||
STM32_MDMA_SHIFT(mask))
|
|
||||||
|
|
||||||
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
|
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
|
||||||
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
|
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
|
||||||
|
|
||||||
@ -80,8 +74,7 @@
|
|||||||
#define STM32_MDMA_CCR_HEX BIT(13)
|
#define STM32_MDMA_CCR_HEX BIT(13)
|
||||||
#define STM32_MDMA_CCR_BEX BIT(12)
|
#define STM32_MDMA_CCR_BEX BIT(12)
|
||||||
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
|
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
|
||||||
#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
|
||||||
STM32_MDMA_CCR_PL_MASK)
|
|
||||||
#define STM32_MDMA_CCR_TCIE BIT(5)
|
#define STM32_MDMA_CCR_TCIE BIT(5)
|
||||||
#define STM32_MDMA_CCR_BTIE BIT(4)
|
#define STM32_MDMA_CCR_BTIE BIT(4)
|
||||||
#define STM32_MDMA_CCR_BRTIE BIT(3)
|
#define STM32_MDMA_CCR_BRTIE BIT(3)
|
||||||
@ -99,48 +92,33 @@
|
|||||||
#define STM32_MDMA_CTCR_BWM BIT(31)
|
#define STM32_MDMA_CTCR_BWM BIT(31)
|
||||||
#define STM32_MDMA_CTCR_SWRM BIT(30)
|
#define STM32_MDMA_CTCR_SWRM BIT(30)
|
||||||
#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
|
#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
|
||||||
#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_TRGM(n) FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
|
||||||
STM32_MDMA_CTCR_TRGM_MSK)
|
#define STM32_MDMA_CTCR_TRGM_GET(n) FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
|
||||||
#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
|
|
||||||
STM32_MDMA_CTCR_TRGM_MSK)
|
|
||||||
#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
|
#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
|
||||||
#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTCR_PAM(n) FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
|
||||||
STM32_MDMA_CTCR_PAM_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_PKE BIT(25)
|
#define STM32_MDMA_CTCR_PKE BIT(25)
|
||||||
#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
|
#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
|
||||||
#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_TLEN(n) FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
|
||||||
STM32_MDMA_CTCR_TLEN_MSK)
|
#define STM32_MDMA_CTCR_TLEN_GET(n) FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
|
||||||
#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
|
|
||||||
STM32_MDMA_CTCR_TLEN_MSK)
|
|
||||||
#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
|
#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
|
||||||
#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_LEN2(n) FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
|
||||||
STM32_MDMA_CTCR_LEN2_MSK)
|
#define STM32_MDMA_CTCR_LEN2_GET(n) FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
|
||||||
#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
|
|
||||||
STM32_MDMA_CTCR_LEN2_MSK)
|
|
||||||
#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
|
#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
|
||||||
#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTCR_DBURST(n) FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
|
||||||
STM32_MDMA_CTCR_DBURST_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
|
#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
|
||||||
#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTCR_SBURST(n) FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
|
||||||
STM32_MDMA_CTCR_SBURST_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
|
#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
|
||||||
#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_DINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
|
||||||
STM32_MDMA_CTCR_DINCOS_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
|
#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
|
||||||
#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_SINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
|
||||||
STM32_MDMA_CTCR_SINCOS_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
|
#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
|
||||||
#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTCR_DSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
|
||||||
STM32_MDMA_CTCR_DSIZE_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
|
#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
|
||||||
#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTCR_SSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
|
||||||
STM32_MDMA_CTCR_SSIZE_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
|
#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
|
||||||
#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_DINC(n) FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
|
||||||
STM32_MDMA_CTCR_DINC_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
|
#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
|
||||||
#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
|
#define STM32_MDMA_CTCR_SINC(n) FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
|
||||||
STM32_MDMA_CTCR_SINC_MASK)
|
|
||||||
#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
|
#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
|
||||||
| STM32_MDMA_CTCR_DINC_MASK \
|
| STM32_MDMA_CTCR_DINC_MASK \
|
||||||
| STM32_MDMA_CTCR_SINCOS_MASK \
|
| STM32_MDMA_CTCR_SINCOS_MASK \
|
||||||
@ -151,16 +129,13 @@
|
|||||||
/* MDMA Channel x block number of data register */
|
/* MDMA Channel x block number of data register */
|
||||||
#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
|
#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
|
||||||
#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
|
#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
|
||||||
#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CBNDTR_BRC(n) FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
|
||||||
STM32_MDMA_CBNDTR_BRC_MK)
|
#define STM32_MDMA_CBNDTR_BRC_GET(n) FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
|
||||||
#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
|
|
||||||
STM32_MDMA_CBNDTR_BRC_MK)
|
|
||||||
|
|
||||||
#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
|
#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
|
||||||
#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
|
#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
|
||||||
#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
|
#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
|
||||||
#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CBNDTR_BNDT(n) FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
|
||||||
STM32_MDMA_CBNDTR_BNDT_MASK)
|
|
||||||
|
|
||||||
/* MDMA Channel x source address register */
|
/* MDMA Channel x source address register */
|
||||||
#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
|
#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
|
||||||
@ -171,11 +146,9 @@
|
|||||||
/* MDMA Channel x block repeat address update register */
|
/* MDMA Channel x block repeat address update register */
|
||||||
#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
|
#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
|
||||||
#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
|
#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
|
||||||
#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CBRUR_DUV(n) FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
|
||||||
STM32_MDMA_CBRUR_DUV_MASK)
|
|
||||||
#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
|
#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
|
||||||
#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CBRUR_SUV(n) FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
|
||||||
STM32_MDMA_CBRUR_SUV_MASK)
|
|
||||||
|
|
||||||
/* MDMA Channel x link address register */
|
/* MDMA Channel x link address register */
|
||||||
#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
|
#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
|
||||||
@ -185,8 +158,7 @@
|
|||||||
#define STM32_MDMA_CTBR_DBUS BIT(17)
|
#define STM32_MDMA_CTBR_DBUS BIT(17)
|
||||||
#define STM32_MDMA_CTBR_SBUS BIT(16)
|
#define STM32_MDMA_CTBR_SBUS BIT(16)
|
||||||
#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
|
#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
|
||||||
#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
|
#define STM32_MDMA_CTBR_TSEL(n) FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
|
||||||
STM32_MDMA_CTBR_TSEL_MASK)
|
|
||||||
|
|
||||||
/* MDMA Channel x mask address register */
|
/* MDMA Channel x mask address register */
|
||||||
#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
|
#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
|
||||||
@ -1279,7 +1251,7 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
|
|||||||
u32 curr_hwdesc)
|
u32 curr_hwdesc)
|
||||||
{
|
{
|
||||||
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
|
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
|
||||||
struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
|
struct stm32_mdma_hwdesc *hwdesc;
|
||||||
u32 cbndtr, residue, modulo, burst_size;
|
u32 cbndtr, residue, modulo, burst_size;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -1566,7 +1538,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||||||
if (count < 0)
|
if (count < 0)
|
||||||
count = 0;
|
count = 0;
|
||||||
|
|
||||||
dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
|
dmadev = devm_kzalloc(&pdev->dev,
|
||||||
|
struct_size(dmadev, ahb_addr_masks, count),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!dmadev)
|
if (!dmadev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -343,12 +343,6 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
|
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
|
||||||
if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
|
|
||||||
sconfig->device_fc) {
|
|
||||||
if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
tdc->slave_id = sconfig->slave_id;
|
|
||||||
}
|
|
||||||
tdc->config_init = true;
|
tdc->config_init = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -43,10 +43,8 @@
|
|||||||
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
|
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
|
||||||
|
|
||||||
#define ADMA_CH_FIFO_CTRL 0x2c
|
#define ADMA_CH_FIFO_CTRL 0x2c
|
||||||
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
|
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
|
||||||
#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
|
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
|
||||||
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
|
|
||||||
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
|
|
||||||
|
|
||||||
#define ADMA_CH_LOWER_SRC_ADDR 0x34
|
#define ADMA_CH_LOWER_SRC_ADDR 0x34
|
||||||
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
|
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
|
||||||
@ -61,29 +59,26 @@
|
|||||||
|
|
||||||
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
|
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
|
||||||
|
|
||||||
#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
|
|
||||||
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
|
|
||||||
|
|
||||||
#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
|
|
||||||
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
|
|
||||||
|
|
||||||
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
|
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
|
||||||
|
|
||||||
struct tegra_adma;
|
struct tegra_adma;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct tegra_adma_chip_data - Tegra chip specific data
|
* struct tegra_adma_chip_data - Tegra chip specific data
|
||||||
|
* @adma_get_burst_config: Function callback used to set DMA burst size.
|
||||||
* @global_reg_offset: Register offset of DMA global register.
|
* @global_reg_offset: Register offset of DMA global register.
|
||||||
* @global_int_clear: Register offset of DMA global interrupt clear.
|
* @global_int_clear: Register offset of DMA global interrupt clear.
|
||||||
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
|
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
|
||||||
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
|
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
|
||||||
* @ch_base_offset: Register offset of DMA channel registers.
|
* @ch_base_offset: Register offset of DMA channel registers.
|
||||||
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
|
|
||||||
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
|
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
|
||||||
* @ch_req_mask: Mask for Tx or Rx channel select.
|
* @ch_req_mask: Mask for Tx or Rx channel select.
|
||||||
* @ch_req_max: Maximum number of Tx or Rx channels available.
|
* @ch_req_max: Maximum number of Tx or Rx channels available.
|
||||||
* @ch_reg_size: Size of DMA channel register space.
|
* @ch_reg_size: Size of DMA channel register space.
|
||||||
* @nr_channels: Number of DMA channels available.
|
* @nr_channels: Number of DMA channels available.
|
||||||
|
* @ch_fifo_size_mask: Mask for FIFO size field.
|
||||||
|
* @sreq_index_offset: Slave channel index offset.
|
||||||
|
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
|
||||||
*/
|
*/
|
||||||
struct tegra_adma_chip_data {
|
struct tegra_adma_chip_data {
|
||||||
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
|
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
|
||||||
@ -97,6 +92,8 @@ struct tegra_adma_chip_data {
|
|||||||
unsigned int ch_req_max;
|
unsigned int ch_req_max;
|
||||||
unsigned int ch_reg_size;
|
unsigned int ch_reg_size;
|
||||||
unsigned int nr_channels;
|
unsigned int nr_channels;
|
||||||
|
unsigned int ch_fifo_size_mask;
|
||||||
|
unsigned int sreq_index_offset;
|
||||||
bool has_outstanding_reqs;
|
bool has_outstanding_reqs;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -560,13 +557,14 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
|||||||
{
|
{
|
||||||
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
|
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
|
||||||
const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
|
const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
|
||||||
unsigned int burst_size, adma_dir;
|
unsigned int burst_size, adma_dir, fifo_size_shift;
|
||||||
|
|
||||||
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
|
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
case DMA_MEM_TO_DEV:
|
case DMA_MEM_TO_DEV:
|
||||||
|
fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT;
|
||||||
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
|
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
|
||||||
burst_size = tdc->sconfig.dst_maxburst;
|
burst_size = tdc->sconfig.dst_maxburst;
|
||||||
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
|
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
|
||||||
@ -577,6 +575,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case DMA_DEV_TO_MEM:
|
case DMA_DEV_TO_MEM:
|
||||||
|
fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT;
|
||||||
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
|
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
|
||||||
burst_size = tdc->sconfig.src_maxburst;
|
burst_size = tdc->sconfig.src_maxburst;
|
||||||
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
|
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
|
||||||
@ -598,7 +597,27 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
|||||||
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
|
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
|
||||||
if (cdata->has_outstanding_reqs)
|
if (cdata->has_outstanding_reqs)
|
||||||
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
|
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
|
||||||
ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
|
|
||||||
|
/*
|
||||||
|
* 'sreq_index' represents the current ADMAIF channel number and as per
|
||||||
|
* HW recommendation its FIFO size should match with the corresponding
|
||||||
|
* ADMA channel.
|
||||||
|
*
|
||||||
|
* ADMA FIFO size is set as per below (based on default ADMAIF channel
|
||||||
|
* FIFO sizes):
|
||||||
|
* fifo_size = 0x2 (sreq_index > sreq_index_offset)
|
||||||
|
* fifo_size = 0x3 (sreq_index <= sreq_index_offset)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
if (tdc->sreq_index > cdata->sreq_index_offset)
|
||||||
|
ch_regs->fifo_ctrl =
|
||||||
|
ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask,
|
||||||
|
fifo_size_shift);
|
||||||
|
else
|
||||||
|
ch_regs->fifo_ctrl =
|
||||||
|
ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask,
|
||||||
|
fifo_size_shift);
|
||||||
|
|
||||||
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
|
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
|
||||||
|
|
||||||
return tegra_adma_request_alloc(tdc, direction);
|
return tegra_adma_request_alloc(tdc, direction);
|
||||||
@ -782,12 +801,13 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
|
|||||||
.ch_req_tx_shift = 28,
|
.ch_req_tx_shift = 28,
|
||||||
.ch_req_rx_shift = 24,
|
.ch_req_rx_shift = 24,
|
||||||
.ch_base_offset = 0,
|
.ch_base_offset = 0,
|
||||||
.has_outstanding_reqs = false,
|
|
||||||
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
|
|
||||||
.ch_req_mask = 0xf,
|
.ch_req_mask = 0xf,
|
||||||
.ch_req_max = 10,
|
.ch_req_max = 10,
|
||||||
.ch_reg_size = 0x80,
|
.ch_reg_size = 0x80,
|
||||||
.nr_channels = 22,
|
.nr_channels = 22,
|
||||||
|
.ch_fifo_size_mask = 0xf,
|
||||||
|
.sreq_index_offset = 2,
|
||||||
|
.has_outstanding_reqs = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct tegra_adma_chip_data tegra186_chip_data = {
|
static const struct tegra_adma_chip_data tegra186_chip_data = {
|
||||||
@ -797,12 +817,13 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
|
|||||||
.ch_req_tx_shift = 27,
|
.ch_req_tx_shift = 27,
|
||||||
.ch_req_rx_shift = 22,
|
.ch_req_rx_shift = 22,
|
||||||
.ch_base_offset = 0x10000,
|
.ch_base_offset = 0x10000,
|
||||||
.has_outstanding_reqs = true,
|
|
||||||
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
|
|
||||||
.ch_req_mask = 0x1f,
|
.ch_req_mask = 0x1f,
|
||||||
.ch_req_max = 20,
|
.ch_req_max = 20,
|
||||||
.ch_reg_size = 0x100,
|
.ch_reg_size = 0x100,
|
||||||
.nr_channels = 32,
|
.nr_channels = 32,
|
||||||
|
.ch_fifo_size_mask = 0x1f,
|
||||||
|
.sreq_index_offset = 4,
|
||||||
|
.has_outstanding_reqs = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id tegra_adma_of_match[] = {
|
static const struct of_device_id tegra_adma_of_match[] = {
|
||||||
@ -940,7 +961,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
|
|||||||
for (i = 0; i < tdma->nr_channels; ++i)
|
for (i = 0; i < tdma->nr_channels; ++i)
|
||||||
irq_dispose_mapping(tdma->channels[i].irq);
|
irq_dispose_mapping(tdma->channels[i].irq);
|
||||||
|
|
||||||
pm_runtime_put_sync(&pdev->dev);
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -8,5 +8,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
|
|||||||
k3-psil-am654.o \
|
k3-psil-am654.o \
|
||||||
k3-psil-j721e.o \
|
k3-psil-j721e.o \
|
||||||
k3-psil-j7200.o \
|
k3-psil-j7200.o \
|
||||||
k3-psil-am64.o
|
k3-psil-am64.o \
|
||||||
|
k3-psil-j721s2.o
|
||||||
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
|
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
|
||||||
|
@ -1681,8 +1681,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
|
|||||||
|
|
||||||
dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
|
dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
|
||||||
emr = val;
|
emr = val;
|
||||||
for (i = find_next_bit(&emr, 32, 0); i < 32;
|
for_each_set_bit(i, &emr, 32) {
|
||||||
i = find_next_bit(&emr, 32, i + 1)) {
|
|
||||||
int k = (j << 5) + i;
|
int k = (j << 5) + i;
|
||||||
|
|
||||||
/* Clear the corresponding EMR bits */
|
/* Clear the corresponding EMR bits */
|
||||||
|
@ -41,5 +41,6 @@ extern struct psil_ep_map am654_ep_map;
|
|||||||
extern struct psil_ep_map j721e_ep_map;
|
extern struct psil_ep_map j721e_ep_map;
|
||||||
extern struct psil_ep_map j7200_ep_map;
|
extern struct psil_ep_map j7200_ep_map;
|
||||||
extern struct psil_ep_map am64_ep_map;
|
extern struct psil_ep_map am64_ep_map;
|
||||||
|
extern struct psil_ep_map j721s2_ep_map;
|
||||||
|
|
||||||
#endif /* K3_PSIL_PRIV_H_ */
|
#endif /* K3_PSIL_PRIV_H_ */
|
||||||
|
@ -21,6 +21,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
|
|||||||
{ .family = "J721E", .data = &j721e_ep_map },
|
{ .family = "J721E", .data = &j721e_ep_map },
|
||||||
{ .family = "J7200", .data = &j7200_ep_map },
|
{ .family = "J7200", .data = &j7200_ep_map },
|
||||||
{ .family = "AM64X", .data = &am64_ep_map },
|
{ .family = "AM64X", .data = &am64_ep_map },
|
||||||
|
{ .family = "J721S2", .data = &j721s2_ep_map },
|
||||||
{ /* sentinel */ }
|
{ /* sentinel */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -168,8 +168,7 @@ int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
|
|||||||
{
|
{
|
||||||
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
|
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
|
||||||
|
|
||||||
return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
|
return msi_get_virq(ud->dev, udma_tflow_id + oes->pktdma_tchan_flow);
|
||||||
oes->pktdma_tchan_flow);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
|
EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
|
||||||
|
|
||||||
@ -177,7 +176,6 @@ int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
|
|||||||
{
|
{
|
||||||
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
|
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
|
||||||
|
|
||||||
return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
|
return msi_get_virq(ud->dev, udma_rflow_id + oes->pktdma_rchan_flow);
|
||||||
oes->pktdma_rchan_flow);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
|
EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
|
||||||
|
@ -2313,8 +2313,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
/* Event from UDMA (TR events) only needed for slave TR mode channels */
|
/* Event from UDMA (TR events) only needed for slave TR mode channels */
|
||||||
if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
|
if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
|
||||||
uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
|
uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
|
||||||
irq_udma_idx);
|
|
||||||
if (uc->irq_num_udma <= 0) {
|
if (uc->irq_num_udma <= 0) {
|
||||||
dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
|
dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
|
||||||
irq_udma_idx);
|
irq_udma_idx);
|
||||||
@ -2486,7 +2485,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
uc->psil_paired = true;
|
uc->psil_paired = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
|
uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
|
||||||
if (uc->irq_num_ring <= 0) {
|
if (uc->irq_num_ring <= 0) {
|
||||||
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
|
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
|
||||||
irq_ring_idx);
|
irq_ring_idx);
|
||||||
@ -2503,8 +2502,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
/* Event from BCDMA (TR events) only needed for slave channels */
|
/* Event from BCDMA (TR events) only needed for slave channels */
|
||||||
if (is_slave_direction(uc->config.dir)) {
|
if (is_slave_direction(uc->config.dir)) {
|
||||||
uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
|
uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
|
||||||
irq_udma_idx);
|
|
||||||
if (uc->irq_num_udma <= 0) {
|
if (uc->irq_num_udma <= 0) {
|
||||||
dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
|
dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
|
||||||
irq_udma_idx);
|
irq_udma_idx);
|
||||||
@ -2672,7 +2670,7 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
|
|
||||||
uc->psil_paired = true;
|
uc->psil_paired = true;
|
||||||
|
|
||||||
uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
|
uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
|
||||||
if (uc->irq_num_ring <= 0) {
|
if (uc->irq_num_ring <= 0) {
|
||||||
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
|
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
|
||||||
irq_ring_idx);
|
irq_ring_idx);
|
||||||
@ -4376,6 +4374,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
|
|||||||
{ .family = "J721E", .data = &j721e_soc_data },
|
{ .family = "J721E", .data = &j721e_soc_data },
|
||||||
{ .family = "J7200", .data = &j7200_soc_data },
|
{ .family = "J7200", .data = &j7200_soc_data },
|
||||||
{ .family = "AM64X", .data = &am64_soc_data },
|
{ .family = "AM64X", .data = &am64_soc_data },
|
||||||
|
{ .family = "J721S2", .data = &j721e_soc_data},
|
||||||
{ /* sentinel */ }
|
{ /* sentinel */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -4534,45 +4533,60 @@ static int udma_setup_resources(struct udma_dev *ud)
|
|||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
|
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
|
||||||
|
irq_res.sets = 1;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
|
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->tchan_map,
|
udma_mark_resource_ranges(ud, ud->tchan_map,
|
||||||
&rm_res->desc[i], "tchan");
|
&rm_res->desc[i], "tchan");
|
||||||
|
irq_res.sets = rm_res->sets;
|
||||||
}
|
}
|
||||||
irq_res.sets = rm_res->sets;
|
|
||||||
|
|
||||||
/* rchan and matching default flow ranges */
|
/* rchan and matching default flow ranges */
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
|
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
|
||||||
|
irq_res.sets++;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
|
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->rchan_map,
|
udma_mark_resource_ranges(ud, ud->rchan_map,
|
||||||
&rm_res->desc[i], "rchan");
|
&rm_res->desc[i], "rchan");
|
||||||
|
irq_res.sets += rm_res->sets;
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_res.sets += rm_res->sets;
|
|
||||||
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
||||||
|
if (!irq_res.desc)
|
||||||
|
return -ENOMEM;
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
||||||
for (i = 0; i < rm_res->sets; i++) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[i].start;
|
irq_res.desc[0].start = 0;
|
||||||
irq_res.desc[i].num = rm_res->desc[i].num;
|
irq_res.desc[0].num = ud->tchan_cnt;
|
||||||
irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
|
i = 1;
|
||||||
irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
|
} else {
|
||||||
|
for (i = 0; i < rm_res->sets; i++) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[i].start;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[i].num;
|
||||||
|
irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
|
||||||
|
irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
||||||
for (j = 0; j < rm_res->sets; j++, i++) {
|
if (IS_ERR(rm_res)) {
|
||||||
if (rm_res->desc[j].num) {
|
irq_res.desc[i].start = 0;
|
||||||
irq_res.desc[i].start = rm_res->desc[j].start +
|
irq_res.desc[i].num = ud->rchan_cnt;
|
||||||
ud->soc_data->oes.udma_rchan;
|
} else {
|
||||||
irq_res.desc[i].num = rm_res->desc[j].num;
|
for (j = 0; j < rm_res->sets; j++, i++) {
|
||||||
}
|
if (rm_res->desc[j].num) {
|
||||||
if (rm_res->desc[j].num_sec) {
|
irq_res.desc[i].start = rm_res->desc[j].start +
|
||||||
irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
|
ud->soc_data->oes.udma_rchan;
|
||||||
ud->soc_data->oes.udma_rchan;
|
irq_res.desc[i].num = rm_res->desc[j].num;
|
||||||
irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
|
}
|
||||||
|
if (rm_res->desc[j].num_sec) {
|
||||||
|
irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
|
||||||
|
ud->soc_data->oes.udma_rchan;
|
||||||
|
irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
|
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
|
||||||
@ -4690,14 +4704,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
|
|||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
|
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
|
||||||
|
irq_res.sets++;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
|
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->bchan_map,
|
udma_mark_resource_ranges(ud, ud->bchan_map,
|
||||||
&rm_res->desc[i],
|
&rm_res->desc[i],
|
||||||
"bchan");
|
"bchan");
|
||||||
|
irq_res.sets += rm_res->sets;
|
||||||
}
|
}
|
||||||
irq_res.sets += rm_res->sets;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* tchan ranges */
|
/* tchan ranges */
|
||||||
@ -4705,14 +4720,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
|
|||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
|
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
|
||||||
|
irq_res.sets += 2;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
|
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->tchan_map,
|
udma_mark_resource_ranges(ud, ud->tchan_map,
|
||||||
&rm_res->desc[i],
|
&rm_res->desc[i],
|
||||||
"tchan");
|
"tchan");
|
||||||
|
irq_res.sets += rm_res->sets * 2;
|
||||||
}
|
}
|
||||||
irq_res.sets += rm_res->sets * 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rchan ranges */
|
/* rchan ranges */
|
||||||
@ -4720,47 +4736,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
|
|||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
|
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
|
||||||
|
irq_res.sets += 2;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
|
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->rchan_map,
|
udma_mark_resource_ranges(ud, ud->rchan_map,
|
||||||
&rm_res->desc[i],
|
&rm_res->desc[i],
|
||||||
"rchan");
|
"rchan");
|
||||||
|
irq_res.sets += rm_res->sets * 2;
|
||||||
}
|
}
|
||||||
irq_res.sets += rm_res->sets * 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
||||||
|
if (!irq_res.desc)
|
||||||
|
return -ENOMEM;
|
||||||
if (ud->bchan_cnt) {
|
if (ud->bchan_cnt) {
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
|
||||||
for (i = 0; i < rm_res->sets; i++) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[i].start +
|
irq_res.desc[0].start = oes->bcdma_bchan_ring;
|
||||||
oes->bcdma_bchan_ring;
|
irq_res.desc[0].num = ud->bchan_cnt;
|
||||||
irq_res.desc[i].num = rm_res->desc[i].num;
|
i = 1;
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < rm_res->sets; i++) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[i].start +
|
||||||
|
oes->bcdma_bchan_ring;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[i].num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ud->tchan_cnt) {
|
if (ud->tchan_cnt) {
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
|
||||||
for (j = 0; j < rm_res->sets; j++, i += 2) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[j].start +
|
irq_res.desc[i].start = oes->bcdma_tchan_data;
|
||||||
oes->bcdma_tchan_data;
|
irq_res.desc[i].num = ud->tchan_cnt;
|
||||||
irq_res.desc[i].num = rm_res->desc[j].num;
|
irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
|
||||||
|
irq_res.desc[i + 1].num = ud->tchan_cnt;
|
||||||
|
i += 2;
|
||||||
|
} else {
|
||||||
|
for (j = 0; j < rm_res->sets; j++, i += 2) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[j].start +
|
||||||
|
oes->bcdma_tchan_data;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[j].num;
|
||||||
|
|
||||||
irq_res.desc[i + 1].start = rm_res->desc[j].start +
|
irq_res.desc[i + 1].start = rm_res->desc[j].start +
|
||||||
oes->bcdma_tchan_ring;
|
oes->bcdma_tchan_ring;
|
||||||
irq_res.desc[i + 1].num = rm_res->desc[j].num;
|
irq_res.desc[i + 1].num = rm_res->desc[j].num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ud->rchan_cnt) {
|
if (ud->rchan_cnt) {
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
||||||
for (j = 0; j < rm_res->sets; j++, i += 2) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[j].start +
|
irq_res.desc[i].start = oes->bcdma_rchan_data;
|
||||||
oes->bcdma_rchan_data;
|
irq_res.desc[i].num = ud->rchan_cnt;
|
||||||
irq_res.desc[i].num = rm_res->desc[j].num;
|
irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
|
||||||
|
irq_res.desc[i + 1].num = ud->rchan_cnt;
|
||||||
|
i += 2;
|
||||||
|
} else {
|
||||||
|
for (j = 0; j < rm_res->sets; j++, i += 2) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[j].start +
|
||||||
|
oes->bcdma_rchan_data;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[j].num;
|
||||||
|
|
||||||
irq_res.desc[i + 1].start = rm_res->desc[j].start +
|
irq_res.desc[i + 1].start = rm_res->desc[j].start +
|
||||||
oes->bcdma_rchan_ring;
|
oes->bcdma_rchan_ring;
|
||||||
irq_res.desc[i + 1].num = rm_res->desc[j].num;
|
irq_res.desc[i + 1].num = rm_res->desc[j].num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4858,39 +4899,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
|
|||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
/* all rflows are assigned exclusively to Linux */
|
/* all rflows are assigned exclusively to Linux */
|
||||||
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
|
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
|
||||||
|
irq_res.sets = 1;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
|
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->rflow_in_use,
|
udma_mark_resource_ranges(ud, ud->rflow_in_use,
|
||||||
&rm_res->desc[i], "rflow");
|
&rm_res->desc[i], "rflow");
|
||||||
|
irq_res.sets = rm_res->sets;
|
||||||
}
|
}
|
||||||
irq_res.sets = rm_res->sets;
|
|
||||||
|
|
||||||
/* tflow ranges */
|
/* tflow ranges */
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
|
||||||
if (IS_ERR(rm_res)) {
|
if (IS_ERR(rm_res)) {
|
||||||
/* all tflows are assigned exclusively to Linux */
|
/* all tflows are assigned exclusively to Linux */
|
||||||
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
|
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
|
||||||
|
irq_res.sets++;
|
||||||
} else {
|
} else {
|
||||||
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
|
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
|
||||||
for (i = 0; i < rm_res->sets; i++)
|
for (i = 0; i < rm_res->sets; i++)
|
||||||
udma_mark_resource_ranges(ud, ud->tflow_map,
|
udma_mark_resource_ranges(ud, ud->tflow_map,
|
||||||
&rm_res->desc[i], "tflow");
|
&rm_res->desc[i], "tflow");
|
||||||
|
irq_res.sets += rm_res->sets;
|
||||||
}
|
}
|
||||||
irq_res.sets += rm_res->sets;
|
|
||||||
|
|
||||||
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
|
||||||
|
if (!irq_res.desc)
|
||||||
|
return -ENOMEM;
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
|
||||||
for (i = 0; i < rm_res->sets; i++) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[i].start +
|
irq_res.desc[0].start = oes->pktdma_tchan_flow;
|
||||||
oes->pktdma_tchan_flow;
|
irq_res.desc[0].num = ud->tflow_cnt;
|
||||||
irq_res.desc[i].num = rm_res->desc[i].num;
|
i = 1;
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < rm_res->sets; i++) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[i].start +
|
||||||
|
oes->pktdma_tchan_flow;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[i].num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
|
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
|
||||||
for (j = 0; j < rm_res->sets; j++, i++) {
|
if (IS_ERR(rm_res)) {
|
||||||
irq_res.desc[i].start = rm_res->desc[j].start +
|
irq_res.desc[i].start = oes->pktdma_rchan_flow;
|
||||||
oes->pktdma_rchan_flow;
|
irq_res.desc[i].num = ud->rflow_cnt;
|
||||||
irq_res.desc[i].num = rm_res->desc[j].num;
|
} else {
|
||||||
|
for (j = 0; j < rm_res->sets; j++, i++) {
|
||||||
|
irq_res.desc[i].start = rm_res->desc[j].start +
|
||||||
|
oes->pktdma_rchan_flow;
|
||||||
|
irq_res.desc[i].num = rm_res->desc[j].num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
|
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
|
||||||
kfree(irq_res.desc);
|
kfree(irq_res.desc);
|
||||||
@ -5279,9 +5335,9 @@ static int udma_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(ud->ringacc))
|
if (IS_ERR(ud->ringacc))
|
||||||
return PTR_ERR(ud->ringacc);
|
return PTR_ERR(ud->ringacc);
|
||||||
|
|
||||||
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
|
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
|
||||||
DOMAIN_BUS_TI_SCI_INTA_MSI);
|
DOMAIN_BUS_TI_SCI_INTA_MSI);
|
||||||
if (!dev->msi_domain) {
|
if (!dev->msi.domain) {
|
||||||
dev_err(dev, "Failed to get MSI domain\n");
|
dev_err(dev, "Failed to get MSI domain\n");
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
}
|
}
|
||||||
|
@ -792,7 +792,7 @@ static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xilinx_dma_tx_descriptor - Allocate transaction descriptor
|
* xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
|
||||||
* @chan: Driver specific DMA channel
|
* @chan: Driver specific DMA channel
|
||||||
*
|
*
|
||||||
* Return: The allocated descriptor on success and NULL on failure.
|
* Return: The allocated descriptor on success and NULL on failure.
|
||||||
@ -998,14 +998,12 @@ static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
|
|||||||
struct xilinx_dma_tx_descriptor *desc,
|
struct xilinx_dma_tx_descriptor *desc,
|
||||||
unsigned long *flags)
|
unsigned long *flags)
|
||||||
{
|
{
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
|
||||||
spin_unlock_irqrestore(&chan->lock, *flags);
|
spin_unlock_irqrestore(&chan->lock, *flags);
|
||||||
callback(callback_param);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
spin_lock_irqsave(&chan->lock, *flags);
|
spin_lock_irqsave(&chan->lock, *flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2129,6 +2127,126 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
|
||||||
|
* @dchan: DMA channel
|
||||||
|
* @dst_sg: Destination scatter list
|
||||||
|
* @dst_sg_len: Number of entries in destination scatter list
|
||||||
|
* @src_sg: Source scatter list
|
||||||
|
* @src_sg_len: Number of entries in source scatter list
|
||||||
|
* @flags: transfer ack flags
|
||||||
|
*
|
||||||
|
* Return: Async transaction descriptor on success and NULL on failure
|
||||||
|
*/
|
||||||
|
static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
|
||||||
|
struct dma_chan *dchan, struct scatterlist *dst_sg,
|
||||||
|
unsigned int dst_sg_len, struct scatterlist *src_sg,
|
||||||
|
unsigned int src_sg_len, unsigned long flags)
|
||||||
|
{
|
||||||
|
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
|
||||||
|
struct xilinx_dma_tx_descriptor *desc;
|
||||||
|
struct xilinx_cdma_tx_segment *segment, *prev = NULL;
|
||||||
|
struct xilinx_cdma_desc_hw *hw;
|
||||||
|
size_t len, dst_avail, src_avail;
|
||||||
|
dma_addr_t dma_dst, dma_src;
|
||||||
|
|
||||||
|
if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (unlikely(!dst_sg || !src_sg))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
desc = xilinx_dma_alloc_tx_descriptor(chan);
|
||||||
|
if (!desc)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
|
||||||
|
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
|
||||||
|
|
||||||
|
dst_avail = sg_dma_len(dst_sg);
|
||||||
|
src_avail = sg_dma_len(src_sg);
|
||||||
|
/*
|
||||||
|
* loop until there is either no more source or no more destination
|
||||||
|
* scatterlist entry
|
||||||
|
*/
|
||||||
|
while (true) {
|
||||||
|
len = min_t(size_t, src_avail, dst_avail);
|
||||||
|
len = min_t(size_t, len, chan->xdev->max_buffer_len);
|
||||||
|
if (len == 0)
|
||||||
|
goto fetch;
|
||||||
|
|
||||||
|
/* Allocate the link descriptor from DMA pool */
|
||||||
|
segment = xilinx_cdma_alloc_tx_segment(chan);
|
||||||
|
if (!segment)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
|
||||||
|
dst_avail;
|
||||||
|
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
|
||||||
|
src_avail;
|
||||||
|
hw = &segment->hw;
|
||||||
|
hw->control = len;
|
||||||
|
hw->src_addr = dma_src;
|
||||||
|
hw->dest_addr = dma_dst;
|
||||||
|
if (chan->ext_addr) {
|
||||||
|
hw->src_addr_msb = upper_32_bits(dma_src);
|
||||||
|
hw->dest_addr_msb = upper_32_bits(dma_dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prev) {
|
||||||
|
prev->hw.next_desc = segment->phys;
|
||||||
|
if (chan->ext_addr)
|
||||||
|
prev->hw.next_desc_msb =
|
||||||
|
upper_32_bits(segment->phys);
|
||||||
|
}
|
||||||
|
|
||||||
|
prev = segment;
|
||||||
|
dst_avail -= len;
|
||||||
|
src_avail -= len;
|
||||||
|
list_add_tail(&segment->node, &desc->segments);
|
||||||
|
|
||||||
|
fetch:
|
||||||
|
/* Fetch the next dst scatterlist entry */
|
||||||
|
if (dst_avail == 0) {
|
||||||
|
if (dst_sg_len == 0)
|
||||||
|
break;
|
||||||
|
dst_sg = sg_next(dst_sg);
|
||||||
|
if (dst_sg == NULL)
|
||||||
|
break;
|
||||||
|
dst_sg_len--;
|
||||||
|
dst_avail = sg_dma_len(dst_sg);
|
||||||
|
}
|
||||||
|
/* Fetch the next src scatterlist entry */
|
||||||
|
if (src_avail == 0) {
|
||||||
|
if (src_sg_len == 0)
|
||||||
|
break;
|
||||||
|
src_sg = sg_next(src_sg);
|
||||||
|
if (src_sg == NULL)
|
||||||
|
break;
|
||||||
|
src_sg_len--;
|
||||||
|
src_avail = sg_dma_len(src_sg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (list_empty(&desc->segments)) {
|
||||||
|
dev_err(chan->xdev->dev,
|
||||||
|
"%s: Zero-size SG transfer requested\n", __func__);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Link the last hardware descriptor with the first. */
|
||||||
|
segment = list_first_entry(&desc->segments,
|
||||||
|
struct xilinx_cdma_tx_segment, node);
|
||||||
|
desc->async_tx.phys = segment->phys;
|
||||||
|
prev->hw.next_desc = segment->phys;
|
||||||
|
|
||||||
|
return &desc->async_tx;
|
||||||
|
|
||||||
|
error:
|
||||||
|
xilinx_dma_free_tx_descriptor(chan, desc);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
|
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
|
||||||
* @dchan: DMA channel
|
* @dchan: DMA channel
|
||||||
@ -2483,7 +2601,7 @@ static void xilinx_dma_synchronize(struct dma_chan *dchan)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xilinx_dma_channel_set_config - Configure VDMA channel
|
* xilinx_vdma_channel_set_config - Configure VDMA channel
|
||||||
* Run-time configuration for Axi VDMA, supports:
|
* Run-time configuration for Axi VDMA, supports:
|
||||||
* . halt the channel
|
* . halt the channel
|
||||||
* . configure interrupt coalescing and inter-packet delay threshold
|
* . configure interrupt coalescing and inter-packet delay threshold
|
||||||
@ -2862,7 +2980,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Request the interrupt */
|
/* Request the interrupt */
|
||||||
chan->irq = irq_of_parse_and_map(node, chan->tdest);
|
chan->irq = of_irq_get(node, chan->tdest);
|
||||||
|
if (chan->irq < 0)
|
||||||
|
return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
|
||||||
err = request_irq(chan->irq, xdev->dma_config->irq_handler,
|
err = request_irq(chan->irq, xdev->dma_config->irq_handler,
|
||||||
IRQF_SHARED, "xilinx-dma-controller", chan);
|
IRQF_SHARED, "xilinx-dma-controller", chan);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -2936,8 +3056,11 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
|
|||||||
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
|
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
|
||||||
dev_warn(xdev->dev, "missing dma-channels property\n");
|
dev_warn(xdev->dev, "missing dma-channels property\n");
|
||||||
|
|
||||||
for (i = 0; i < nr_channels; i++)
|
for (i = 0; i < nr_channels; i++) {
|
||||||
xilinx_dma_chan_probe(xdev, node);
|
ret = xilinx_dma_chan_probe(xdev, node);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3117,7 +3240,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
|||||||
DMA_RESIDUE_GRANULARITY_SEGMENT;
|
DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||||
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
|
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
|
||||||
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
|
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
|
||||||
|
dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
|
||||||
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
|
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
|
||||||
|
xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
|
||||||
/* Residue calculation is supported by only AXI DMA and CDMA */
|
/* Residue calculation is supported by only AXI DMA and CDMA */
|
||||||
xdev->common.residue_granularity =
|
xdev->common.residue_granularity =
|
||||||
DMA_RESIDUE_GRANULARITY_SEGMENT;
|
DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
#include <linux/dma/xilinx_dpdma.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/dmapool.h>
|
#include <linux/dmapool.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
@ -1273,6 +1274,7 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
|
|||||||
struct dma_slave_config *config)
|
struct dma_slave_config *config)
|
||||||
{
|
{
|
||||||
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
|
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
|
||||||
|
struct xilinx_dpdma_peripheral_config *pconfig;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1282,15 +1284,18 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
|
|||||||
* fixed both on the DPDMA side and on the DP controller side.
|
* fixed both on the DPDMA side and on the DP controller side.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, flags);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Abuse the slave_id to indicate that the channel is part of a video
|
* Use the peripheral_config to indicate that the channel is part
|
||||||
* group.
|
* of a video group. This requires matching use of the custom
|
||||||
|
* structure in each driver.
|
||||||
*/
|
*/
|
||||||
if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
|
pconfig = config->peripheral_config;
|
||||||
chan->video_group = config->slave_id != 0;
|
if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
|
if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
|
||||||
|
chan->video_group = pconfig->video_group;
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6,15 +6,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/dmapool.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/dma/xilinx_dma.h>
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of_address.h>
|
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
#include <linux/of_irq.h>
|
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
@ -603,22 +600,25 @@ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
|
|||||||
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_desc_sw *desc, *next;
|
struct zynqmp_dma_desc_sw *desc, *next;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
|
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
spin_unlock(&chan->lock);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
callback(callback_param);
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
spin_lock(&chan->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Run any dependencies, then free the descriptor */
|
/* Run any dependencies, then free the descriptor */
|
||||||
zynqmp_dma_free_descriptor(chan, desc);
|
zynqmp_dma_free_descriptor(chan, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -658,9 +658,13 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
|
|||||||
*/
|
*/
|
||||||
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->active_list);
|
zynqmp_dma_free_desc_list(chan, &chan->active_list);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
|
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->done_list);
|
zynqmp_dma_free_desc_list(chan, &chan->done_list);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -670,11 +674,8 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
|||||||
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
|
||||||
dma_free_coherent(chan->dev,
|
dma_free_coherent(chan->dev,
|
||||||
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
|
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
|
||||||
chan->desc_pool_v, chan->desc_pool_p);
|
chan->desc_pool_v, chan->desc_pool_p);
|
||||||
@ -689,11 +690,16 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
|||||||
*/
|
*/
|
||||||
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
zynqmp_dma_complete_descriptor(chan);
|
zynqmp_dma_complete_descriptor(chan);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
zynqmp_dma_chan_desc_cleanup(chan);
|
zynqmp_dma_chan_desc_cleanup(chan);
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
|
|
||||||
zynqmp_dma_init(chan);
|
zynqmp_dma_init(chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -749,27 +755,27 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
|
|||||||
u32 count;
|
u32 count;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
|
|
||||||
if (chan->err) {
|
if (chan->err) {
|
||||||
zynqmp_dma_reset(chan);
|
zynqmp_dma_reset(chan);
|
||||||
chan->err = false;
|
chan->err = false;
|
||||||
goto unlock;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
|
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
|
||||||
|
|
||||||
while (count) {
|
while (count) {
|
||||||
zynqmp_dma_complete_descriptor(chan);
|
zynqmp_dma_complete_descriptor(chan);
|
||||||
zynqmp_dma_chan_desc_cleanup(chan);
|
|
||||||
count--;
|
count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chan->idle)
|
|
||||||
zynqmp_dma_start_transfer(chan);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
|
|
||||||
|
zynqmp_dma_chan_desc_cleanup(chan);
|
||||||
|
|
||||||
|
if (chan->idle) {
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
|
zynqmp_dma_start_transfer(chan);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -781,12 +787,9 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
|
|||||||
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
|
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1061,16 +1064,14 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||||||
p->dev = &pdev->dev;
|
p->dev = &pdev->dev;
|
||||||
|
|
||||||
zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
|
zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
|
||||||
if (IS_ERR(zdev->clk_main)) {
|
if (IS_ERR(zdev->clk_main))
|
||||||
dev_err(&pdev->dev, "main clock not found.\n");
|
return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main),
|
||||||
return PTR_ERR(zdev->clk_main);
|
"main clock not found.\n");
|
||||||
}
|
|
||||||
|
|
||||||
zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
|
zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
|
||||||
if (IS_ERR(zdev->clk_apb)) {
|
if (IS_ERR(zdev->clk_apb))
|
||||||
dev_err(&pdev->dev, "apb clock not found.\n");
|
return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb),
|
||||||
return PTR_ERR(zdev->clk_apb);
|
"apb clock not found.\n");
|
||||||
}
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, zdev);
|
platform_set_drvdata(pdev, zdev);
|
||||||
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
|
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
|
||||||
@ -1085,7 +1086,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
ret = zynqmp_dma_chan_probe(zdev, pdev);
|
ret = zynqmp_dma_chan_probe(zdev, pdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "Probing channel failed\n");
|
dev_err_probe(&pdev->dev, ret, "Probing channel failed\n");
|
||||||
goto err_disable_pm;
|
goto err_disable_pm;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1097,7 +1098,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||||
of_zynqmp_dma_xlate, zdev);
|
of_zynqmp_dma_xlate, zdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "Unable to register DMA to DT\n");
|
dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n");
|
||||||
dma_async_device_unregister(&zdev->common);
|
dma_async_device_unregister(&zdev->common);
|
||||||
goto free_chan_resources;
|
goto free_chan_resources;
|
||||||
}
|
}
|
||||||
@ -1105,8 +1106,6 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||||||
pm_runtime_mark_last_busy(zdev->dev);
|
pm_runtime_mark_last_busy(zdev->dev);
|
||||||
pm_runtime_put_sync_autosuspend(zdev->dev);
|
pm_runtime_put_sync_autosuspend(zdev->dev);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_chan_resources:
|
free_chan_resources:
|
||||||
|
@ -484,7 +484,7 @@ config EDAC_ARMADA_XP
|
|||||||
|
|
||||||
config EDAC_SYNOPSYS
|
config EDAC_SYNOPSYS
|
||||||
tristate "Synopsys DDR Memory Controller"
|
tristate "Synopsys DDR Memory Controller"
|
||||||
depends on ARCH_ZYNQ || ARCH_ZYNQMP
|
depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA
|
||||||
help
|
help
|
||||||
Support for error detection and correction on the Synopsys DDR
|
Support for error detection and correction on the Synopsys DDR
|
||||||
memory controller.
|
memory controller.
|
||||||
|
@ -238,11 +238,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
|
|||||||
if (!mci)
|
if (!mci)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
|
ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_free, mci);
|
||||||
if (ret) {
|
if (ret)
|
||||||
edac_mc_free(mci);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, mci);
|
platform_set_drvdata(pdev, mci);
|
||||||
al_mc = mci->pvt_info;
|
al_mc = mci->pvt_info;
|
||||||
@ -293,11 +291,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
|
ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
|
||||||
if (ret) {
|
if (ret)
|
||||||
edac_mc_del_mc(&pdev->dev);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
if (al_mc->irq_ue > 0) {
|
if (al_mc->irq_ue > 0) {
|
||||||
ret = devm_request_irq(&pdev->dev,
|
ret = devm_request_irq(&pdev->dev,
|
||||||
|
@ -988,6 +988,281 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
|
|||||||
return csrow;
|
return csrow;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Protect the PCI config register pairs used for DF indirect access. */
|
||||||
|
static DEFINE_MUTEX(df_indirect_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Data Fabric Indirect Access uses FICAA/FICAD.
|
||||||
|
*
|
||||||
|
* Fabric Indirect Configuration Access Address (FICAA): Constructed based
|
||||||
|
* on the device's Instance Id and the PCI function and register offset of
|
||||||
|
* the desired register.
|
||||||
|
*
|
||||||
|
* Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
|
||||||
|
* and FICAD HI registers but so far we only need the LO register.
|
||||||
|
*
|
||||||
|
* Use Instance Id 0xFF to indicate a broadcast read.
|
||||||
|
*/
|
||||||
|
#define DF_BROADCAST 0xFF
|
||||||
|
static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
|
||||||
|
{
|
||||||
|
struct pci_dev *F4;
|
||||||
|
u32 ficaa;
|
||||||
|
int err = -ENODEV;
|
||||||
|
|
||||||
|
if (node >= amd_nb_num())
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
F4 = node_to_amd_nb(node)->link;
|
||||||
|
if (!F4)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
|
||||||
|
ficaa |= reg & 0x3FC;
|
||||||
|
ficaa |= (func & 0x7) << 11;
|
||||||
|
ficaa |= instance_id << 16;
|
||||||
|
|
||||||
|
mutex_lock(&df_indirect_mutex);
|
||||||
|
|
||||||
|
err = pci_write_config_dword(F4, 0x5C, ficaa);
|
||||||
|
if (err) {
|
||||||
|
pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pci_read_config_dword(F4, 0x98, lo);
|
||||||
|
if (err)
|
||||||
|
pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&df_indirect_mutex);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
|
||||||
|
{
|
||||||
|
return __df_indirect_read(node, func, reg, instance_id, lo);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
|
||||||
|
{
|
||||||
|
return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct addr_ctx {
|
||||||
|
u64 ret_addr;
|
||||||
|
u32 tmp;
|
||||||
|
u16 nid;
|
||||||
|
u8 inst_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
|
||||||
|
{
|
||||||
|
u64 dram_base_addr, dram_limit_addr, dram_hole_base;
|
||||||
|
|
||||||
|
u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
|
||||||
|
u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
|
||||||
|
u8 intlv_addr_sel, intlv_addr_bit;
|
||||||
|
u8 num_intlv_bits, hashed_bit;
|
||||||
|
u8 lgcy_mmio_hole_en, base = 0;
|
||||||
|
u8 cs_mask, cs_id = 0;
|
||||||
|
bool hash_enabled = false;
|
||||||
|
|
||||||
|
struct addr_ctx ctx;
|
||||||
|
|
||||||
|
memset(&ctx, 0, sizeof(ctx));
|
||||||
|
|
||||||
|
/* Start from the normalized address */
|
||||||
|
ctx.ret_addr = norm_addr;
|
||||||
|
|
||||||
|
ctx.nid = nid;
|
||||||
|
ctx.inst_id = umc;
|
||||||
|
|
||||||
|
/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
|
||||||
|
if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
/* Remove HiAddrOffset from normalized address, if enabled: */
|
||||||
|
if (ctx.tmp & BIT(0)) {
|
||||||
|
u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
|
||||||
|
|
||||||
|
if (norm_addr >= hi_addr_offset) {
|
||||||
|
ctx.ret_addr -= hi_addr_offset;
|
||||||
|
base = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Read D18F0x110 (DramBaseAddress). */
|
||||||
|
if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
/* Check if address range is valid. */
|
||||||
|
if (!(ctx.tmp & BIT(0))) {
|
||||||
|
pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
|
||||||
|
__func__, ctx.tmp);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
lgcy_mmio_hole_en = ctx.tmp & BIT(1);
|
||||||
|
intlv_num_chan = (ctx.tmp >> 4) & 0xF;
|
||||||
|
intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
|
||||||
|
dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
|
||||||
|
|
||||||
|
/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
|
||||||
|
if (intlv_addr_sel > 3) {
|
||||||
|
pr_err("%s: Invalid interleave address select %d.\n",
|
||||||
|
__func__, intlv_addr_sel);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Read D18F0x114 (DramLimitAddress). */
|
||||||
|
if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
|
||||||
|
intlv_num_dies = (ctx.tmp >> 10) & 0x3;
|
||||||
|
dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
|
||||||
|
|
||||||
|
intlv_addr_bit = intlv_addr_sel + 8;
|
||||||
|
|
||||||
|
/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
|
||||||
|
switch (intlv_num_chan) {
|
||||||
|
case 0: intlv_num_chan = 0; break;
|
||||||
|
case 1: intlv_num_chan = 1; break;
|
||||||
|
case 3: intlv_num_chan = 2; break;
|
||||||
|
case 5: intlv_num_chan = 3; break;
|
||||||
|
case 7: intlv_num_chan = 4; break;
|
||||||
|
|
||||||
|
case 8: intlv_num_chan = 1;
|
||||||
|
hash_enabled = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_err("%s: Invalid number of interleaved channels %d.\n",
|
||||||
|
__func__, intlv_num_chan);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_intlv_bits = intlv_num_chan;
|
||||||
|
|
||||||
|
if (intlv_num_dies > 2) {
|
||||||
|
pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
|
||||||
|
__func__, intlv_num_dies);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_intlv_bits += intlv_num_dies;
|
||||||
|
|
||||||
|
/* Add a bit if sockets are interleaved. */
|
||||||
|
num_intlv_bits += intlv_num_sockets;
|
||||||
|
|
||||||
|
/* Assert num_intlv_bits <= 4 */
|
||||||
|
if (num_intlv_bits > 4) {
|
||||||
|
pr_err("%s: Invalid interleave bits %d.\n",
|
||||||
|
__func__, num_intlv_bits);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_intlv_bits > 0) {
|
||||||
|
u64 temp_addr_x, temp_addr_i, temp_addr_y;
|
||||||
|
u8 die_id_bit, sock_id_bit, cs_fabric_id;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read FabricBlockInstanceInformation3_CS[BlockFabricID].
|
||||||
|
* This is the fabric id for this coherent slave. Use
|
||||||
|
* umc/channel# as instance id of the coherent slave
|
||||||
|
* for FICAA.
|
||||||
|
*/
|
||||||
|
if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
|
||||||
|
die_id_bit = 0;
|
||||||
|
|
||||||
|
/* If interleaved over more than 1 channel: */
|
||||||
|
if (intlv_num_chan) {
|
||||||
|
die_id_bit = intlv_num_chan;
|
||||||
|
cs_mask = (1 << die_id_bit) - 1;
|
||||||
|
cs_id = cs_fabric_id & cs_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
sock_id_bit = die_id_bit;
|
||||||
|
|
||||||
|
/* Read D18F1x208 (SystemFabricIdMask). */
|
||||||
|
if (intlv_num_dies || intlv_num_sockets)
|
||||||
|
if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
/* If interleaved over more than 1 die. */
|
||||||
|
if (intlv_num_dies) {
|
||||||
|
sock_id_bit = die_id_bit + intlv_num_dies;
|
||||||
|
die_id_shift = (ctx.tmp >> 24) & 0xF;
|
||||||
|
die_id_mask = (ctx.tmp >> 8) & 0xFF;
|
||||||
|
|
||||||
|
cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If interleaved over more than 1 socket. */
|
||||||
|
if (intlv_num_sockets) {
|
||||||
|
socket_id_shift = (ctx.tmp >> 28) & 0xF;
|
||||||
|
socket_id_mask = (ctx.tmp >> 16) & 0xFF;
|
||||||
|
|
||||||
|
cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The pre-interleaved address consists of XXXXXXIIIYYYYY
|
||||||
|
* where III is the ID for this CS, and XXXXXXYYYYY are the
|
||||||
|
* address bits from the post-interleaved address.
|
||||||
|
* "num_intlv_bits" has been calculated to tell us how many "I"
|
||||||
|
* bits there are. "intlv_addr_bit" tells us how many "Y" bits
|
||||||
|
* there are (where "I" starts).
|
||||||
|
*/
|
||||||
|
temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
|
||||||
|
temp_addr_i = (cs_id << intlv_addr_bit);
|
||||||
|
temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
|
||||||
|
ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add dram base address */
|
||||||
|
ctx.ret_addr += dram_base_addr;
|
||||||
|
|
||||||
|
/* If legacy MMIO hole enabled */
|
||||||
|
if (lgcy_mmio_hole_en) {
|
||||||
|
if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
dram_hole_base = ctx.tmp & GENMASK(31, 24);
|
||||||
|
if (ctx.ret_addr >= dram_hole_base)
|
||||||
|
ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_enabled) {
|
||||||
|
/* Save some parentheses and grab ls-bit at the end. */
|
||||||
|
hashed_bit = (ctx.ret_addr >> 12) ^
|
||||||
|
(ctx.ret_addr >> 18) ^
|
||||||
|
(ctx.ret_addr >> 21) ^
|
||||||
|
(ctx.ret_addr >> 30) ^
|
||||||
|
cs_id;
|
||||||
|
|
||||||
|
hashed_bit &= BIT(0);
|
||||||
|
|
||||||
|
if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
|
||||||
|
ctx.ret_addr ^= BIT(intlv_addr_bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Is calculated system address is above DRAM limit address? */
|
||||||
|
if (ctx.ret_addr > dram_limit_addr)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
*sys_addr = ctx.ret_addr;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_err:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
|
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2650,6 +2925,26 @@ static struct amd64_family_type family_types[] = {
|
|||||||
.dbam_to_cs = f17_addr_mask_to_cs_size,
|
.dbam_to_cs = f17_addr_mask_to_cs_size,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
[F19_M10H_CPUS] = {
|
||||||
|
.ctl_name = "F19h_M10h",
|
||||||
|
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
|
||||||
|
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
|
||||||
|
.max_mcs = 12,
|
||||||
|
.ops = {
|
||||||
|
.early_channel_count = f17_early_channel_count,
|
||||||
|
.dbam_to_cs = f17_addr_mask_to_cs_size,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[F19_M50H_CPUS] = {
|
||||||
|
.ctl_name = "F19h_M50h",
|
||||||
|
.f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
|
||||||
|
.f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
|
||||||
|
.max_mcs = 2,
|
||||||
|
.ops = {
|
||||||
|
.early_channel_count = f17_early_channel_count,
|
||||||
|
.dbam_to_cs = f17_addr_mask_to_cs_size,
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3687,11 +3982,25 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 0x19:
|
case 0x19:
|
||||||
if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
|
if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
|
||||||
|
fam_type = &family_types[F19_M10H_CPUS];
|
||||||
|
pvt->ops = &family_types[F19_M10H_CPUS].ops;
|
||||||
|
break;
|
||||||
|
} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
|
||||||
fam_type = &family_types[F17_M70H_CPUS];
|
fam_type = &family_types[F17_M70H_CPUS];
|
||||||
pvt->ops = &family_types[F17_M70H_CPUS].ops;
|
pvt->ops = &family_types[F17_M70H_CPUS].ops;
|
||||||
fam_type->ctl_name = "F19h_M20h";
|
fam_type->ctl_name = "F19h_M20h";
|
||||||
break;
|
break;
|
||||||
|
} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
|
||||||
|
fam_type = &family_types[F19_M50H_CPUS];
|
||||||
|
pvt->ops = &family_types[F19_M50H_CPUS].ops;
|
||||||
|
fam_type->ctl_name = "F19h_M50h";
|
||||||
|
break;
|
||||||
|
} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
|
||||||
|
fam_type = &family_types[F19_M10H_CPUS];
|
||||||
|
pvt->ops = &family_types[F19_M10H_CPUS].ops;
|
||||||
|
fam_type->ctl_name = "F19h_MA0h";
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
fam_type = &family_types[F19_CPUS];
|
fam_type = &family_types[F19_CPUS];
|
||||||
pvt->ops = &family_types[F19_CPUS].ops;
|
pvt->ops = &family_types[F19_CPUS].ops;
|
||||||
|
@ -96,7 +96,7 @@
|
|||||||
/* Hardware limit on ChipSelect rows per MC and processors per system */
|
/* Hardware limit on ChipSelect rows per MC and processors per system */
|
||||||
#define NUM_CHIPSELECTS 8
|
#define NUM_CHIPSELECTS 8
|
||||||
#define DRAM_RANGES 8
|
#define DRAM_RANGES 8
|
||||||
#define NUM_CONTROLLERS 8
|
#define NUM_CONTROLLERS 12
|
||||||
|
|
||||||
#define ON true
|
#define ON true
|
||||||
#define OFF false
|
#define OFF false
|
||||||
@ -126,6 +126,10 @@
|
|||||||
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
|
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
|
||||||
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
|
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
|
||||||
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
|
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
|
||||||
|
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
|
||||||
|
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
|
||||||
|
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
|
||||||
|
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Function 1 - Address Map
|
* Function 1 - Address Map
|
||||||
@ -298,6 +302,8 @@ enum amd_families {
|
|||||||
F17_M60H_CPUS,
|
F17_M60H_CPUS,
|
||||||
F17_M70H_CPUS,
|
F17_M70H_CPUS,
|
||||||
F19_CPUS,
|
F19_CPUS,
|
||||||
|
F19_M10H_CPUS,
|
||||||
|
F19_M50H_CPUS,
|
||||||
NUM_FAMILIES,
|
NUM_FAMILIES,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -66,14 +66,12 @@ unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
|
|||||||
char *p = buf;
|
char *p = buf;
|
||||||
|
|
||||||
for (i = 0; i < mci->n_layers; i++) {
|
for (i = 0; i < mci->n_layers; i++) {
|
||||||
n = snprintf(p, len, "%s %d ",
|
n = scnprintf(p, len, "%s %d ",
|
||||||
edac_layer_name[mci->layers[i].type],
|
edac_layer_name[mci->layers[i].type],
|
||||||
dimm->location[i]);
|
dimm->location[i]);
|
||||||
p += n;
|
p += n;
|
||||||
len -= n;
|
len -= n;
|
||||||
count += n;
|
count += n;
|
||||||
if (!len)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
@ -164,6 +162,8 @@ const char * const edac_mem_types[] = {
|
|||||||
[MEM_LPDDR4] = "Low-Power-DDR4-RAM",
|
[MEM_LPDDR4] = "Low-Power-DDR4-RAM",
|
||||||
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
|
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
|
||||||
[MEM_DDR5] = "Unbuffered-DDR5",
|
[MEM_DDR5] = "Unbuffered-DDR5",
|
||||||
|
[MEM_RDDR5] = "Registered-DDR5",
|
||||||
|
[MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
|
||||||
[MEM_NVDIMM] = "Non-volatile-RAM",
|
[MEM_NVDIMM] = "Non-volatile-RAM",
|
||||||
[MEM_WIO2] = "Wide-IO-2",
|
[MEM_WIO2] = "Wide-IO-2",
|
||||||
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
|
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
|
||||||
@ -341,19 +341,16 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
|
|||||||
*/
|
*/
|
||||||
len = sizeof(dimm->label);
|
len = sizeof(dimm->label);
|
||||||
p = dimm->label;
|
p = dimm->label;
|
||||||
n = snprintf(p, len, "mc#%u", mci->mc_idx);
|
n = scnprintf(p, len, "mc#%u", mci->mc_idx);
|
||||||
p += n;
|
p += n;
|
||||||
len -= n;
|
len -= n;
|
||||||
for (layer = 0; layer < mci->n_layers; layer++) {
|
for (layer = 0; layer < mci->n_layers; layer++) {
|
||||||
n = snprintf(p, len, "%s#%u",
|
n = scnprintf(p, len, "%s#%u",
|
||||||
edac_layer_name[mci->layers[layer].type],
|
edac_layer_name[mci->layers[layer].type],
|
||||||
pos[layer]);
|
pos[layer]);
|
||||||
p += n;
|
p += n;
|
||||||
len -= n;
|
len -= n;
|
||||||
dimm->location[layer] = pos[layer];
|
dimm->location[layer] = pos[layer];
|
||||||
|
|
||||||
if (len <= 0)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Link it to the csrows old API data */
|
/* Link it to the csrows old API data */
|
||||||
@ -1027,12 +1024,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
|
|||||||
const char *other_detail)
|
const char *other_detail)
|
||||||
{
|
{
|
||||||
struct dimm_info *dimm;
|
struct dimm_info *dimm;
|
||||||
char *p;
|
char *p, *end;
|
||||||
int row = -1, chan = -1;
|
int row = -1, chan = -1;
|
||||||
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
|
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
|
||||||
int i, n_labels = 0;
|
int i, n_labels = 0;
|
||||||
struct edac_raw_error_desc *e = &mci->error_desc;
|
struct edac_raw_error_desc *e = &mci->error_desc;
|
||||||
bool any_memory = true;
|
bool any_memory = true;
|
||||||
|
const char *prefix;
|
||||||
|
|
||||||
edac_dbg(3, "MC%d\n", mci->mc_idx);
|
edac_dbg(3, "MC%d\n", mci->mc_idx);
|
||||||
|
|
||||||
@ -1087,6 +1085,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
|
|||||||
*/
|
*/
|
||||||
p = e->label;
|
p = e->label;
|
||||||
*p = '\0';
|
*p = '\0';
|
||||||
|
end = p + sizeof(e->label);
|
||||||
|
prefix = "";
|
||||||
|
|
||||||
mci_for_each_dimm(mci, dimm) {
|
mci_for_each_dimm(mci, dimm) {
|
||||||
if (top_layer >= 0 && top_layer != dimm->location[0])
|
if (top_layer >= 0 && top_layer != dimm->location[0])
|
||||||
@ -1114,12 +1114,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
|
|||||||
p = e->label;
|
p = e->label;
|
||||||
*p = '\0';
|
*p = '\0';
|
||||||
} else {
|
} else {
|
||||||
if (p != e->label) {
|
p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
|
||||||
strcpy(p, OTHER_LABEL);
|
prefix = OTHER_LABEL;
|
||||||
p += strlen(OTHER_LABEL);
|
|
||||||
}
|
|
||||||
strcpy(p, dimm->label);
|
|
||||||
p += strlen(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1141,25 +1137,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (any_memory)
|
if (any_memory)
|
||||||
strcpy(e->label, "any memory");
|
strscpy(e->label, "any memory", sizeof(e->label));
|
||||||
else if (!*e->label)
|
else if (!*e->label)
|
||||||
strcpy(e->label, "unknown memory");
|
strscpy(e->label, "unknown memory", sizeof(e->label));
|
||||||
|
|
||||||
edac_inc_csrow(e, row, chan);
|
edac_inc_csrow(e, row, chan);
|
||||||
|
|
||||||
/* Fill the RAM location data */
|
/* Fill the RAM location data */
|
||||||
p = e->location;
|
p = e->location;
|
||||||
|
end = p + sizeof(e->location);
|
||||||
|
prefix = "";
|
||||||
|
|
||||||
for (i = 0; i < mci->n_layers; i++) {
|
for (i = 0; i < mci->n_layers; i++) {
|
||||||
if (pos[i] < 0)
|
if (pos[i] < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
p += sprintf(p, "%s:%d ",
|
p += scnprintf(p, end - p, "%s%s:%d", prefix,
|
||||||
edac_layer_name[mci->layers[i].type],
|
edac_layer_name[mci->layers[i].type], pos[i]);
|
||||||
pos[i]);
|
prefix = " ";
|
||||||
}
|
}
|
||||||
if (p > e->location)
|
|
||||||
*(p - 1) = '\0';
|
|
||||||
|
|
||||||
edac_raw_mc_handle_error(e);
|
edac_raw_mc_handle_error(e);
|
||||||
}
|
}
|
||||||
|
@ -399,6 +399,63 @@ static const char * const smca_mp5_mce_desc[] = {
|
|||||||
"Instruction Tag Cache Bank B ECC or parity error",
|
"Instruction Tag Cache Bank B ECC or parity error",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const char * const smca_mpdma_mce_desc[] = {
|
||||||
|
"Main SRAM [31:0] bank ECC or parity error",
|
||||||
|
"Main SRAM [63:32] bank ECC or parity error",
|
||||||
|
"Main SRAM [95:64] bank ECC or parity error",
|
||||||
|
"Main SRAM [127:96] bank ECC or parity error",
|
||||||
|
"Data Cache Bank A ECC or parity error",
|
||||||
|
"Data Cache Bank B ECC or parity error",
|
||||||
|
"Data Tag Cache Bank A ECC or parity error",
|
||||||
|
"Data Tag Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank B ECC or parity error",
|
||||||
|
"Data Cache Bank A ECC or parity error",
|
||||||
|
"Data Cache Bank B ECC or parity error",
|
||||||
|
"Data Tag Cache Bank A ECC or parity error",
|
||||||
|
"Data Tag Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank B ECC or parity error",
|
||||||
|
"Data Cache Bank A ECC or parity error",
|
||||||
|
"Data Cache Bank B ECC or parity error",
|
||||||
|
"Data Tag Cache Bank A ECC or parity error",
|
||||||
|
"Data Tag Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Cache Bank B ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank A ECC or parity error",
|
||||||
|
"Instruction Tag Cache Bank B ECC or parity error",
|
||||||
|
"System Hub Read Buffer ECC or parity error",
|
||||||
|
"MPDMA TVF DVSEC Memory ECC or parity error",
|
||||||
|
"MPDMA TVF MMIO Mailbox0 ECC or parity error",
|
||||||
|
"MPDMA TVF MMIO Mailbox1 ECC or parity error",
|
||||||
|
"MPDMA TVF Doorbell Memory ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Slave Memory 0 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Slave Memory 1 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Slave Memory 2 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 0 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 1 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 2 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 3 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 4 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 5 ECC or parity error",
|
||||||
|
"MPDMA TVF SDP Master Memory 6 ECC or parity error",
|
||||||
|
"MPDMA PTE Command FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE Hub Data FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE Internal Data FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE Command Memory DMA ECC or parity error",
|
||||||
|
"MPDMA PTE Command Memory Internal ECC or parity error",
|
||||||
|
"MPDMA PTE DMA Completion FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE Descriptor Completion FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
|
||||||
|
"MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
|
||||||
|
"SDP Watchdog Timer expired",
|
||||||
|
};
|
||||||
|
|
||||||
static const char * const smca_nbio_mce_desc[] = {
|
static const char * const smca_nbio_mce_desc[] = {
|
||||||
"ECC or Parity error",
|
"ECC or Parity error",
|
||||||
"PCIE error",
|
"PCIE error",
|
||||||
@ -448,7 +505,7 @@ static const char * const smca_xgmipcs_mce_desc[] = {
|
|||||||
"Rx Replay Timeout Error",
|
"Rx Replay Timeout Error",
|
||||||
"LinkSub Tx Timeout Error",
|
"LinkSub Tx Timeout Error",
|
||||||
"LinkSub Rx Timeout Error",
|
"LinkSub Rx Timeout Error",
|
||||||
"Rx CMD Pocket Error",
|
"Rx CMD Packet Error",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char * const smca_xgmiphy_mce_desc[] = {
|
static const char * const smca_xgmiphy_mce_desc[] = {
|
||||||
@ -458,11 +515,66 @@ static const char * const smca_xgmiphy_mce_desc[] = {
|
|||||||
"PHY APB error",
|
"PHY APB error",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char * const smca_waflphy_mce_desc[] = {
|
static const char * const smca_nbif_mce_desc[] = {
|
||||||
"RAM ECC Error",
|
"Timeout error from GMI",
|
||||||
"ARC instruction buffer parity error",
|
"SRAM ECC error",
|
||||||
"ARC data buffer parity error",
|
"NTB Error Event",
|
||||||
"PHY APB error",
|
"SDP Parity error",
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const smca_sata_mce_desc[] = {
|
||||||
|
"Parity error for port 0",
|
||||||
|
"Parity error for port 1",
|
||||||
|
"Parity error for port 2",
|
||||||
|
"Parity error for port 3",
|
||||||
|
"Parity error for port 4",
|
||||||
|
"Parity error for port 5",
|
||||||
|
"Parity error for port 6",
|
||||||
|
"Parity error for port 7",
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const smca_usb_mce_desc[] = {
|
||||||
|
"Parity error or ECC error for S0 RAM0",
|
||||||
|
"Parity error or ECC error for S0 RAM1",
|
||||||
|
"Parity error or ECC error for S0 RAM2",
|
||||||
|
"Parity error for PHY RAM0",
|
||||||
|
"Parity error for PHY RAM1",
|
||||||
|
"AXI Slave Response error",
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const smca_gmipcs_mce_desc[] = {
|
||||||
|
"Data Loss Error",
|
||||||
|
"Training Error",
|
||||||
|
"Replay Parity Error",
|
||||||
|
"Rx Fifo Underflow Error",
|
||||||
|
"Rx Fifo Overflow Error",
|
||||||
|
"CRC Error",
|
||||||
|
"BER Exceeded Error",
|
||||||
|
"Tx Fifo Underflow Error",
|
||||||
|
"Replay Buffer Parity Error",
|
||||||
|
"Tx Overflow Error",
|
||||||
|
"Replay Fifo Overflow Error",
|
||||||
|
"Replay Fifo Underflow Error",
|
||||||
|
"Elastic Fifo Overflow Error",
|
||||||
|
"Deskew Error",
|
||||||
|
"Offline Error",
|
||||||
|
"Data Startup Limit Error",
|
||||||
|
"FC Init Timeout Error",
|
||||||
|
"Recovery Timeout Error",
|
||||||
|
"Ready Serial Timeout Error",
|
||||||
|
"Ready Serial Attempt Error",
|
||||||
|
"Recovery Attempt Error",
|
||||||
|
"Recovery Relock Attempt Error",
|
||||||
|
"Deskew Abort Error",
|
||||||
|
"Rx Buffer Error",
|
||||||
|
"Rx LFDS Fifo Overflow Error",
|
||||||
|
"Rx LFDS Fifo Underflow Error",
|
||||||
|
"LinkSub Tx Timeout Error",
|
||||||
|
"LinkSub Rx Timeout Error",
|
||||||
|
"Rx CMD Packet Error",
|
||||||
|
"LFDS Training Timeout Error",
|
||||||
|
"LFDS FC Init Timeout Error",
|
||||||
|
"Data Loss Error",
|
||||||
};
|
};
|
||||||
|
|
||||||
struct smca_mce_desc {
|
struct smca_mce_desc {
|
||||||
@ -490,12 +602,21 @@ static struct smca_mce_desc smca_mce_descs[] = {
|
|||||||
[SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
|
[SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
|
||||||
[SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
|
[SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
|
||||||
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
|
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
|
||||||
|
[SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
|
||||||
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
|
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
|
||||||
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
|
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
|
||||||
[SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
|
[SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
|
||||||
[SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
|
[SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
|
||||||
|
/* NBIF and SHUB have the same error descriptions, for now. */
|
||||||
|
[SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
|
||||||
|
[SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
|
||||||
|
[SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
|
||||||
|
[SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
|
||||||
|
[SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
|
||||||
|
/* All the PHY bank types have the same error descriptions, for now. */
|
||||||
[SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
|
[SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
|
||||||
[SMCA_WAFL_PHY] = { smca_waflphy_mce_desc, ARRAY_SIZE(smca_waflphy_mce_desc) },
|
[SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
|
||||||
|
[SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool f12h_mc0_mce(u16 ec, u8 xec)
|
static bool f12h_mc0_mce(u16 ec, u8 xec)
|
||||||
@ -1045,20 +1166,13 @@ static void decode_mc6_mce(struct mce *m)
|
|||||||
/* Decode errors according to Scalable MCA specification */
|
/* Decode errors according to Scalable MCA specification */
|
||||||
static void decode_smca_error(struct mce *m)
|
static void decode_smca_error(struct mce *m)
|
||||||
{
|
{
|
||||||
struct smca_hwid *hwid;
|
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
|
||||||
enum smca_bank_types bank_type;
|
|
||||||
const char *ip_name;
|
const char *ip_name;
|
||||||
u8 xec = XEC(m->status, xec_mask);
|
u8 xec = XEC(m->status, xec_mask);
|
||||||
|
|
||||||
if (m->bank >= ARRAY_SIZE(smca_banks))
|
if (bank_type >= N_SMCA_BANK_TYPES)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hwid = smca_banks[m->bank].hwid;
|
|
||||||
if (!hwid)
|
|
||||||
return;
|
|
||||||
|
|
||||||
bank_type = hwid->bank_type;
|
|
||||||
|
|
||||||
if (bank_type == SMCA_RESERVED) {
|
if (bank_type == SMCA_RESERVED) {
|
||||||
pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
|
pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
|
||||||
return;
|
return;
|
||||||
|
@ -3439,7 +3439,7 @@ MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
|
|||||||
|
|
||||||
static int sbridge_probe(const struct x86_cpu_id *id)
|
static int sbridge_probe(const struct x86_cpu_id *id)
|
||||||
{
|
{
|
||||||
int rc = -ENODEV;
|
int rc;
|
||||||
u8 mc, num_mc = 0;
|
u8 mc, num_mc = 0;
|
||||||
struct sbridge_dev *sbridge_dev;
|
struct sbridge_dev *sbridge_dev;
|
||||||
struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
|
struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
|
||||||
|
@ -19,7 +19,7 @@ struct sifive_edac_priv {
|
|||||||
struct edac_device_ctl_info *dci;
|
struct edac_device_ctl_info *dci;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* EDAC error callback
|
* EDAC error callback
|
||||||
*
|
*
|
||||||
* @event: non-zero if unrecoverable.
|
* @event: non-zero if unrecoverable.
|
||||||
|
@ -101,6 +101,7 @@
|
|||||||
/* DDR ECC Quirks */
|
/* DDR ECC Quirks */
|
||||||
#define DDR_ECC_INTR_SUPPORT BIT(0)
|
#define DDR_ECC_INTR_SUPPORT BIT(0)
|
||||||
#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
|
#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
|
||||||
|
#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
|
||||||
|
|
||||||
/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
|
/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
|
||||||
/* ECC Configuration Registers */
|
/* ECC Configuration Registers */
|
||||||
@ -171,6 +172,10 @@
|
|||||||
#define DDR_QOS_IRQ_EN_OFST 0x20208
|
#define DDR_QOS_IRQ_EN_OFST 0x20208
|
||||||
#define DDR_QOS_IRQ_DB_OFST 0x2020C
|
#define DDR_QOS_IRQ_DB_OFST 0x2020C
|
||||||
|
|
||||||
|
/* DDR QOS Interrupt register definitions */
|
||||||
|
#define DDR_UE_MASK BIT(9)
|
||||||
|
#define DDR_CE_MASK BIT(8)
|
||||||
|
|
||||||
/* ECC Corrected Error Register Mask and Shifts*/
|
/* ECC Corrected Error Register Mask and Shifts*/
|
||||||
#define ECC_CEADDR0_RW_MASK 0x3FFFF
|
#define ECC_CEADDR0_RW_MASK 0x3FFFF
|
||||||
#define ECC_CEADDR0_RNK_MASK BIT(24)
|
#define ECC_CEADDR0_RNK_MASK BIT(24)
|
||||||
@ -533,10 +538,16 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
|
|||||||
priv = mci->pvt_info;
|
priv = mci->pvt_info;
|
||||||
p_data = priv->p_data;
|
p_data = priv->p_data;
|
||||||
|
|
||||||
regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
|
/*
|
||||||
regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
|
* v3.0 of the controller has the ce/ue bits cleared automatically,
|
||||||
if (!(regval & ECC_CE_UE_INTR_MASK))
|
* so this condition does not apply.
|
||||||
return IRQ_NONE;
|
*/
|
||||||
|
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
|
||||||
|
regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
|
||||||
|
regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
|
||||||
|
if (!(regval & ECC_CE_UE_INTR_MASK))
|
||||||
|
return IRQ_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
status = p_data->get_error_info(priv);
|
status = p_data->get_error_info(priv);
|
||||||
if (status)
|
if (status)
|
||||||
@ -548,7 +559,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
|
|||||||
|
|
||||||
edac_dbg(3, "Total error count CE %d UE %d\n",
|
edac_dbg(3, "Total error count CE %d UE %d\n",
|
||||||
priv->ce_cnt, priv->ue_cnt);
|
priv->ce_cnt, priv->ue_cnt);
|
||||||
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
|
/* v3.0 of the controller does not have this register */
|
||||||
|
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
|
||||||
|
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -834,8 +847,13 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
|
|||||||
static void enable_intr(struct synps_edac_priv *priv)
|
static void enable_intr(struct synps_edac_priv *priv)
|
||||||
{
|
{
|
||||||
/* Enable UE/CE Interrupts */
|
/* Enable UE/CE Interrupts */
|
||||||
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
|
||||||
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
|
writel(DDR_UE_MASK | DDR_CE_MASK,
|
||||||
|
priv->baseaddr + ECC_CLR_OFST);
|
||||||
|
else
|
||||||
|
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
|
||||||
|
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void disable_intr(struct synps_edac_priv *priv)
|
static void disable_intr(struct synps_edac_priv *priv)
|
||||||
@ -890,6 +908,19 @@ static const struct synps_platform_data zynqmp_edac_def = {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct synps_platform_data synopsys_edac_def = {
|
||||||
|
.get_error_info = zynqmp_get_error_info,
|
||||||
|
.get_mtype = zynqmp_get_mtype,
|
||||||
|
.get_dtype = zynqmp_get_dtype,
|
||||||
|
.get_ecc_state = zynqmp_get_ecc_state,
|
||||||
|
.quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
|
||||||
|
#ifdef CONFIG_EDAC_DEBUG
|
||||||
|
| DDR_ECC_DATA_POISON_SUPPORT
|
||||||
|
#endif
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
static const struct of_device_id synps_edac_match[] = {
|
static const struct of_device_id synps_edac_match[] = {
|
||||||
{
|
{
|
||||||
.compatible = "xlnx,zynq-ddrc-a05",
|
.compatible = "xlnx,zynq-ddrc-a05",
|
||||||
@ -899,6 +930,10 @@ static const struct of_device_id synps_edac_match[] = {
|
|||||||
.compatible = "xlnx,zynqmp-ddrc-2.40a",
|
.compatible = "xlnx,zynqmp-ddrc-2.40a",
|
||||||
.data = (void *)&zynqmp_edac_def
|
.data = (void *)&zynqmp_edac_def
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.compatible = "snps,ddrc-3.80a",
|
||||||
|
.data = (void *)&synopsys_edac_def
|
||||||
|
},
|
||||||
{
|
{
|
||||||
/* end of table */
|
/* end of table */
|
||||||
}
|
}
|
||||||
|
@ -245,11 +245,8 @@ static int ti_edac_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
reg = devm_ioremap_resource(dev, res);
|
reg = devm_ioremap_resource(dev, res);
|
||||||
if (IS_ERR(reg)) {
|
if (IS_ERR(reg))
|
||||||
edac_printk(KERN_ERR, EDAC_MOD_NAME,
|
|
||||||
"EMIF controller regs not defined\n");
|
|
||||||
return PTR_ERR(reg);
|
return PTR_ERR(reg);
|
||||||
}
|
|
||||||
|
|
||||||
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
|
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
|
||||||
layers[0].size = 1;
|
layers[0].size = 1;
|
||||||
@ -281,8 +278,6 @@ static int ti_edac_probe(struct platform_device *pdev)
|
|||||||
error_irq = platform_get_irq(pdev, 0);
|
error_irq = platform_get_irq(pdev, 0);
|
||||||
if (error_irq < 0) {
|
if (error_irq < 0) {
|
||||||
ret = error_irq;
|
ret = error_irq;
|
||||||
edac_printk(KERN_ERR, EDAC_MOD_NAME,
|
|
||||||
"EMIF irq number not defined.\n");
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/err.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/firewire.h>
|
#include <linux/firewire.h>
|
||||||
#include <linux/firewire-cdev.h>
|
#include <linux/firewire-cdev.h>
|
||||||
@ -953,11 +954,25 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
|
|||||||
return DMA_FROM_DEVICE;
|
return DMA_FROM_DEVICE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
|
||||||
|
fw_iso_mc_callback_t callback,
|
||||||
|
void *callback_data)
|
||||||
|
{
|
||||||
|
struct fw_iso_context *ctx;
|
||||||
|
|
||||||
|
ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
|
||||||
|
0, 0, 0, NULL, callback_data);
|
||||||
|
if (!IS_ERR(ctx))
|
||||||
|
ctx->callback.mc = callback;
|
||||||
|
|
||||||
|
return ctx;
|
||||||
|
}
|
||||||
|
|
||||||
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
||||||
{
|
{
|
||||||
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
|
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
|
||||||
struct fw_iso_context *context;
|
struct fw_iso_context *context;
|
||||||
fw_iso_callback_t cb;
|
union fw_iso_callback cb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
|
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
|
||||||
@ -970,7 +985,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
|||||||
if (a->speed > SCODE_3200 || a->channel > 63)
|
if (a->speed > SCODE_3200 || a->channel > 63)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cb = iso_callback;
|
cb.sc = iso_callback;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case FW_ISO_CONTEXT_RECEIVE:
|
case FW_ISO_CONTEXT_RECEIVE:
|
||||||
@ -978,19 +993,24 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
|
|||||||
a->channel > 63)
|
a->channel > 63)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cb = iso_callback;
|
cb.sc = iso_callback;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
|
||||||
cb = (fw_iso_callback_t)iso_mc_callback;
|
cb.mc = iso_mc_callback;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
context = fw_iso_context_create(client->device->card, a->type,
|
if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
|
||||||
a->channel, a->speed, a->header_size, cb, client);
|
context = fw_iso_mc_context_create(client->device->card, cb.mc,
|
||||||
|
client);
|
||||||
|
else
|
||||||
|
context = fw_iso_context_create(client->device->card, a->type,
|
||||||
|
a->channel, a->speed,
|
||||||
|
a->header_size, cb.sc, client);
|
||||||
if (IS_ERR(context))
|
if (IS_ERR(context))
|
||||||
return PTR_ERR(context);
|
return PTR_ERR(context);
|
||||||
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
|
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
|
||||||
|
@ -1443,8 +1443,8 @@ static int fwnet_probe(struct fw_unit *unit,
|
|||||||
struct net_device *net;
|
struct net_device *net;
|
||||||
bool allocated_netdev = false;
|
bool allocated_netdev = false;
|
||||||
struct fwnet_device *dev;
|
struct fwnet_device *dev;
|
||||||
|
union fwnet_hwaddr ha;
|
||||||
int ret;
|
int ret;
|
||||||
union fwnet_hwaddr *ha;
|
|
||||||
|
|
||||||
mutex_lock(&fwnet_device_mutex);
|
mutex_lock(&fwnet_device_mutex);
|
||||||
|
|
||||||
@ -1491,12 +1491,12 @@ static int fwnet_probe(struct fw_unit *unit,
|
|||||||
net->max_mtu = 4096U;
|
net->max_mtu = 4096U;
|
||||||
|
|
||||||
/* Set our hardware address while we're at it */
|
/* Set our hardware address while we're at it */
|
||||||
ha = (union fwnet_hwaddr *)net->dev_addr;
|
ha.uc.uniq_id = cpu_to_be64(card->guid);
|
||||||
put_unaligned_be64(card->guid, &ha->uc.uniq_id);
|
ha.uc.max_rec = dev->card->max_receive;
|
||||||
ha->uc.max_rec = dev->card->max_receive;
|
ha.uc.sspd = dev->card->link_speed;
|
||||||
ha->uc.sspd = dev->card->link_speed;
|
ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
|
||||||
put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
|
ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
|
||||||
put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
|
dev_addr_set(net, ha.u);
|
||||||
|
|
||||||
memset(net->broadcast, -1, net->addr_len);
|
memset(net->broadcast, -1, net->addr_len);
|
||||||
|
|
||||||
|
@ -1375,7 +1375,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
|
|||||||
sbp2_unmap_scatterlist(device->card->device, orb);
|
sbp2_unmap_scatterlist(device->card->device, orb);
|
||||||
|
|
||||||
orb->cmd->result = result;
|
orb->cmd->result = result;
|
||||||
orb->cmd->scsi_done(orb->cmd);
|
scsi_done(orb->cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
|
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
|
||||||
@ -1578,11 +1578,13 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
|
|||||||
|
|
||||||
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
|
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
|
||||||
|
|
||||||
static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
|
static struct attribute *sbp2_scsi_sysfs_attrs[] = {
|
||||||
&dev_attr_ieee1394_id,
|
&dev_attr_ieee1394_id.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
|
||||||
|
|
||||||
static struct scsi_host_template scsi_driver_template = {
|
static struct scsi_host_template scsi_driver_template = {
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.name = "SBP-2 IEEE-1394",
|
.name = "SBP-2 IEEE-1394",
|
||||||
@ -1595,7 +1597,7 @@ static struct scsi_host_template scsi_driver_template = {
|
|||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.max_segment_size = SBP2_MAX_SEG_SIZE,
|
.max_segment_size = SBP2_MAX_SEG_SIZE,
|
||||||
.can_queue = 1,
|
.can_queue = 1,
|
||||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
.sdev_groups = sbp2_scsi_sysfs_groups,
|
||||||
};
|
};
|
||||||
|
|
||||||
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
|
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
|
||||||
|
@ -81,7 +81,6 @@ config WIREGUARD
|
|||||||
select CRYPTO
|
select CRYPTO
|
||||||
select CRYPTO_LIB_CURVE25519
|
select CRYPTO_LIB_CURVE25519
|
||||||
select CRYPTO_LIB_CHACHA20POLY1305
|
select CRYPTO_LIB_CHACHA20POLY1305
|
||||||
select CRYPTO_LIB_BLAKE2S
|
|
||||||
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
|
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
|
||||||
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
|
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
|
||||||
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
|
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
|
||||||
@ -291,6 +290,23 @@ config GTP
|
|||||||
To compile this drivers as a module, choose M here: the module
|
To compile this drivers as a module, choose M here: the module
|
||||||
will be called gtp.
|
will be called gtp.
|
||||||
|
|
||||||
|
config AMT
|
||||||
|
tristate "Automatic Multicast Tunneling (AMT)"
|
||||||
|
depends on INET && IP_MULTICAST
|
||||||
|
depends on IPV6 || !IPV6
|
||||||
|
select NET_UDP_TUNNEL
|
||||||
|
help
|
||||||
|
This allows one to create AMT(Automatic Multicast Tunneling)
|
||||||
|
virtual interfaces that provide multicast tunneling.
|
||||||
|
There are two roles, Gateway, and Relay.
|
||||||
|
Gateway Encapsulates IGMP/MLD traffic from listeners to the Relay.
|
||||||
|
Gateway Decapsulates multicast traffic from the Relay to Listeners.
|
||||||
|
Relay Encapsulates multicast traffic from Sources to Gateway.
|
||||||
|
Relay Decapsulates IGMP/MLD traffic from Gateway.
|
||||||
|
|
||||||
|
To compile this drivers as a module, choose M here: the module
|
||||||
|
will be called amt.
|
||||||
|
|
||||||
config MACSEC
|
config MACSEC
|
||||||
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
|
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
|
||||||
select CRYPTO
|
select CRYPTO
|
||||||
@ -550,9 +566,7 @@ config XEN_NETDEV_BACKEND
|
|||||||
config VMXNET3
|
config VMXNET3
|
||||||
tristate "VMware VMXNET3 ethernet driver"
|
tristate "VMware VMXNET3 ethernet driver"
|
||||||
depends on PCI && INET
|
depends on PCI && INET
|
||||||
depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
|
depends on PAGE_SIZE_LESS_THAN_64KB
|
||||||
IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
|
|
||||||
PPC_64K_PAGES)
|
|
||||||
help
|
help
|
||||||
This driver supports VMware's vmxnet3 virtual ethernet NIC.
|
This driver supports VMware's vmxnet3 virtual ethernet NIC.
|
||||||
To compile this driver as a module, choose M here: the
|
To compile this driver as a module, choose M here: the
|
||||||
|
@ -14,6 +14,7 @@ obj-$(CONFIG_WIREGUARD) += wireguard/
|
|||||||
obj-$(CONFIG_EQUALIZER) += eql.o
|
obj-$(CONFIG_EQUALIZER) += eql.o
|
||||||
obj-$(CONFIG_IFB) += ifb.o
|
obj-$(CONFIG_IFB) += ifb.o
|
||||||
obj-$(CONFIG_MACSEC) += macsec.o
|
obj-$(CONFIG_MACSEC) += macsec.o
|
||||||
|
obj-$(CONFIG_AMT) += amt.o
|
||||||
obj-$(CONFIG_MACVLAN) += macvlan.o
|
obj-$(CONFIG_MACVLAN) += macvlan.o
|
||||||
obj-$(CONFIG_MACVTAP) += macvtap.o
|
obj-$(CONFIG_MACVTAP) += macvtap.o
|
||||||
obj-$(CONFIG_MII) += mii.o
|
obj-$(CONFIG_MII) += mii.o
|
||||||
|
@ -38,6 +38,13 @@ struct bareudp_net {
|
|||||||
struct list_head bareudp_list;
|
struct list_head bareudp_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct bareudp_conf {
|
||||||
|
__be16 ethertype;
|
||||||
|
__be16 port;
|
||||||
|
u16 sport_min;
|
||||||
|
bool multi_proto_mode;
|
||||||
|
};
|
||||||
|
|
||||||
/* Pseudo network device */
|
/* Pseudo network device */
|
||||||
struct bareudp_dev {
|
struct bareudp_dev {
|
||||||
struct net *net; /* netns for packet i/o */
|
struct net *net; /* netns for packet i/o */
|
||||||
@ -577,11 +584,8 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data[IFLA_BAREUDP_PORT])
|
conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
|
||||||
conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
|
conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
|
||||||
|
|
||||||
if (data[IFLA_BAREUDP_ETHERTYPE])
|
|
||||||
conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
|
|
||||||
|
|
||||||
if (data[IFLA_BAREUDP_SRCPORT_MIN])
|
if (data[IFLA_BAREUDP_SRCPORT_MIN])
|
||||||
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
|
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
|
||||||
@ -605,7 +609,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int bareudp_configure(struct net *net, struct net_device *dev,
|
static int bareudp_configure(struct net *net, struct net_device *dev,
|
||||||
struct bareudp_conf *conf)
|
struct bareudp_conf *conf,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
|
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
|
||||||
struct bareudp_dev *t, *bareudp = netdev_priv(dev);
|
struct bareudp_dev *t, *bareudp = netdev_priv(dev);
|
||||||
@ -614,13 +619,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev,
|
|||||||
bareudp->net = net;
|
bareudp->net = net;
|
||||||
bareudp->dev = dev;
|
bareudp->dev = dev;
|
||||||
t = bareudp_find_dev(bn, conf);
|
t = bareudp_find_dev(bn, conf);
|
||||||
if (t)
|
if (t) {
|
||||||
|
NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
if (conf->multi_proto_mode &&
|
if (conf->multi_proto_mode &&
|
||||||
(conf->ethertype != htons(ETH_P_MPLS_UC) &&
|
(conf->ethertype != htons(ETH_P_MPLS_UC) &&
|
||||||
conf->ethertype != htons(ETH_P_IP)))
|
conf->ethertype != htons(ETH_P_IP))) {
|
||||||
|
NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
bareudp->port = conf->port;
|
bareudp->port = conf->port;
|
||||||
bareudp->ethertype = conf->ethertype;
|
bareudp->ethertype = conf->ethertype;
|
||||||
@ -667,7 +676,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = bareudp_configure(net, dev, &conf);
|
err = bareudp_configure(net, dev, &conf, extack);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -724,40 +733,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
|
|||||||
.fill_info = bareudp_fill_info,
|
.fill_info = bareudp_fill_info,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct net_device *bareudp_dev_create(struct net *net, const char *name,
|
|
||||||
u8 name_assign_type,
|
|
||||||
struct bareudp_conf *conf)
|
|
||||||
{
|
|
||||||
struct nlattr *tb[IFLA_MAX + 1];
|
|
||||||
struct net_device *dev;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
memset(tb, 0, sizeof(tb));
|
|
||||||
dev = rtnl_create_link(net, name, name_assign_type,
|
|
||||||
&bareudp_link_ops, tb, NULL);
|
|
||||||
if (IS_ERR(dev))
|
|
||||||
return dev;
|
|
||||||
|
|
||||||
err = bareudp_configure(net, dev, conf);
|
|
||||||
if (err) {
|
|
||||||
free_netdev(dev);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
|
|
||||||
if (err)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
err = rtnl_configure_link(dev, NULL);
|
|
||||||
if (err < 0)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
return dev;
|
|
||||||
err:
|
|
||||||
bareudp_dellink(dev, NULL);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(bareudp_dev_create);
|
|
||||||
|
|
||||||
static __net_init int bareudp_init_net(struct net *net)
|
static __net_init int bareudp_init_net(struct net *net)
|
||||||
{
|
{
|
||||||
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
|
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
|
||||||
|
@ -71,6 +71,7 @@ config NET_DSA_QCA8K
|
|||||||
config NET_DSA_REALTEK_SMI
|
config NET_DSA_REALTEK_SMI
|
||||||
tristate "Realtek SMI Ethernet switch family support"
|
tristate "Realtek SMI Ethernet switch family support"
|
||||||
select NET_DSA_TAG_RTL4_A
|
select NET_DSA_TAG_RTL4_A
|
||||||
|
select NET_DSA_TAG_RTL8_4
|
||||||
select FIXED_PHY
|
select FIXED_PHY
|
||||||
select IRQ_DOMAIN
|
select IRQ_DOMAIN
|
||||||
select REALTEK_PHY
|
select REALTEK_PHY
|
||||||
|
@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
|
|||||||
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
|
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
|
||||||
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
|
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
|
||||||
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
|
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
|
||||||
realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
|
realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
|
||||||
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
|
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
|
||||||
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
|
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
|
||||||
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
|
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
|
||||||
|
@ -62,6 +62,38 @@ static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
|
|||||||
return REG_SWITCH_STATUS;
|
return REG_SWITCH_STATUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
|
||||||
|
{
|
||||||
|
switch (port) {
|
||||||
|
case 0:
|
||||||
|
return REG_LED_0_CNTRL;
|
||||||
|
case 1:
|
||||||
|
return REG_LED_1_CNTRL;
|
||||||
|
case 2:
|
||||||
|
return REG_LED_2_CNTRL;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (priv->type) {
|
||||||
|
case BCM4908_DEVICE_ID:
|
||||||
|
switch (port) {
|
||||||
|
case 3:
|
||||||
|
return REG_LED_3_CNTRL;
|
||||||
|
case 7:
|
||||||
|
return REG_LED_4_CNTRL;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ONCE(1, "Unsupported port %d\n", port);
|
||||||
|
|
||||||
|
/* RO fallback reg */
|
||||||
|
return REG_SWITCH_STATUS;
|
||||||
|
}
|
||||||
|
|
||||||
/* Return the number of active ports, not counting the IMP (CPU) port */
|
/* Return the number of active ports, not counting the IMP (CPU) port */
|
||||||
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
|
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
|
||||||
{
|
{
|
||||||
@ -187,9 +219,14 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
|
|||||||
|
|
||||||
/* Use PHY-driven LED signaling */
|
/* Use PHY-driven LED signaling */
|
||||||
if (!enable) {
|
if (!enable) {
|
||||||
reg = reg_readl(priv, REG_LED_CNTRL(0));
|
u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
|
||||||
reg |= SPDLNK_SRC_SEL;
|
|
||||||
reg_writel(priv, reg, REG_LED_CNTRL(0));
|
if (priv->type == BCM7278_DEVICE_ID ||
|
||||||
|
priv->type == BCM7445_DEVICE_ID) {
|
||||||
|
reg = reg_led_readl(priv, led_ctrl, 0);
|
||||||
|
reg |= LED_CNTRL_SPDLNK_SRC_SEL;
|
||||||
|
reg_led_writel(priv, reg, led_ctrl, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -670,7 +707,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
|
|||||||
if (priv->int_phy_mask & BIT(port))
|
if (priv->int_phy_mask & BIT(port))
|
||||||
return priv->hw_params.gphy_rev;
|
return priv->hw_params.gphy_rev;
|
||||||
else
|
else
|
||||||
return 0;
|
return PHY_BRCM_AUTO_PWRDWN_ENABLE |
|
||||||
|
PHY_BRCM_DIS_TXCRXC_NOENRGY |
|
||||||
|
PHY_BRCM_IDDQ_SUSPEND;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
||||||
@ -686,7 +725,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
|||||||
state->interface != PHY_INTERFACE_MODE_GMII &&
|
state->interface != PHY_INTERFACE_MODE_GMII &&
|
||||||
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
|
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
|
||||||
state->interface != PHY_INTERFACE_MODE_MOCA) {
|
state->interface != PHY_INTERFACE_MODE_MOCA) {
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_zero(supported);
|
||||||
if (port != core_readl(priv, CORE_IMP0_PRT_ID))
|
if (port != core_readl(priv, CORE_IMP0_PRT_ID))
|
||||||
dev_err(ds->dev,
|
dev_err(ds->dev,
|
||||||
"Unsupported interface: %d for port %d\n",
|
"Unsupported interface: %d for port %d\n",
|
||||||
@ -714,10 +753,8 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
|||||||
phylink_set(mask, 100baseT_Half);
|
phylink_set(mask, 100baseT_Half);
|
||||||
phylink_set(mask, 100baseT_Full);
|
phylink_set(mask, 100baseT_Full);
|
||||||
|
|
||||||
bitmap_and(supported, supported, mask,
|
linkmode_and(supported, supported, mask);
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_and(state->advertising, state->advertising, mask);
|
||||||
bitmap_and(state->advertising, state->advertising, mask,
|
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
|
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
|
||||||
@ -1235,9 +1272,14 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
|
|||||||
[REG_SPHY_CNTRL] = 0x24,
|
[REG_SPHY_CNTRL] = 0x24,
|
||||||
[REG_CROSSBAR] = 0xc8,
|
[REG_CROSSBAR] = 0xc8,
|
||||||
[REG_RGMII_11_CNTRL] = 0x014c,
|
[REG_RGMII_11_CNTRL] = 0x014c,
|
||||||
[REG_LED_0_CNTRL] = 0x40,
|
[REG_LED_0_CNTRL] = 0x40,
|
||||||
[REG_LED_1_CNTRL] = 0x4c,
|
[REG_LED_1_CNTRL] = 0x4c,
|
||||||
[REG_LED_2_CNTRL] = 0x58,
|
[REG_LED_2_CNTRL] = 0x58,
|
||||||
|
[REG_LED_3_CNTRL] = 0x64,
|
||||||
|
[REG_LED_4_CNTRL] = 0x88,
|
||||||
|
[REG_LED_5_CNTRL] = 0xa0,
|
||||||
|
[REG_LED_AGGREGATE_CTRL] = 0xb8,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
|
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
|
||||||
|
@ -210,6 +210,16 @@ SF2_IO_MACRO(acb);
|
|||||||
SWITCH_INTR_L2(0);
|
SWITCH_INTR_L2(0);
|
||||||
SWITCH_INTR_L2(1);
|
SWITCH_INTR_L2(1);
|
||||||
|
|
||||||
|
static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
|
||||||
|
{
|
||||||
|
return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
|
||||||
|
{
|
||||||
|
writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
|
||||||
|
}
|
||||||
|
|
||||||
/* RXNFC */
|
/* RXNFC */
|
||||||
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
|
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
|
||||||
struct ethtool_rxnfc *nfc, u32 *rule_locs);
|
struct ethtool_rxnfc *nfc, u32 *rule_locs);
|
||||||
|
@ -25,6 +25,10 @@ enum bcm_sf2_reg_offs {
|
|||||||
REG_LED_0_CNTRL,
|
REG_LED_0_CNTRL,
|
||||||
REG_LED_1_CNTRL,
|
REG_LED_1_CNTRL,
|
||||||
REG_LED_2_CNTRL,
|
REG_LED_2_CNTRL,
|
||||||
|
REG_LED_3_CNTRL,
|
||||||
|
REG_LED_4_CNTRL,
|
||||||
|
REG_LED_5_CNTRL,
|
||||||
|
REG_LED_AGGREGATE_CTRL,
|
||||||
REG_SWITCH_REG_MAX,
|
REG_SWITCH_REG_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -56,6 +60,63 @@ enum bcm_sf2_reg_offs {
|
|||||||
#define CROSSBAR_BCM4908_EXT_GPHY4 1
|
#define CROSSBAR_BCM4908_EXT_GPHY4 1
|
||||||
#define CROSSBAR_BCM4908_EXT_RGMII 2
|
#define CROSSBAR_BCM4908_EXT_RGMII 2
|
||||||
|
|
||||||
|
/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */
|
||||||
|
#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0
|
||||||
|
#define LED_CNTRL_M10_ENCODE_SHIFT 2
|
||||||
|
#define LED_CNTRL_M100_ENCODE_SHIFT 4
|
||||||
|
#define LED_CNTRL_M1000_ENCODE_SHIFT 6
|
||||||
|
#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8
|
||||||
|
#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10
|
||||||
|
#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12
|
||||||
|
#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14
|
||||||
|
#define LED_CNTRL_RX_DV_EN (1 << 16)
|
||||||
|
#define LED_CNTRL_TX_EN_EN (1 << 17)
|
||||||
|
#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18
|
||||||
|
#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20
|
||||||
|
#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22
|
||||||
|
#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24)
|
||||||
|
#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25)
|
||||||
|
#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26)
|
||||||
|
#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27)
|
||||||
|
#define LED_CNTRL_MASK 0x3
|
||||||
|
|
||||||
|
/* Register relative to REG_LED_*_CNTRL (BCM4908) */
|
||||||
|
#define REG_LED_CTRL 0x0
|
||||||
|
#define LED_CTRL_RX_ACT_EN 0x00000001
|
||||||
|
#define LED_CTRL_TX_ACT_EN 0x00000002
|
||||||
|
#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004
|
||||||
|
#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008
|
||||||
|
#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010
|
||||||
|
#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020
|
||||||
|
#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040
|
||||||
|
#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080
|
||||||
|
#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100
|
||||||
|
#define LED_CTRL_ACT_LED_POL_SEL 0x00000200
|
||||||
|
#define LED_CTRL_LED_SPD_OVRD 0x00001c00
|
||||||
|
#define LED_CTRL_LNK_STATUS_OVRD 0x00002000
|
||||||
|
#define LED_CTRL_SPD_OVRD_EN 0x00004000
|
||||||
|
#define LED_CTRL_LNK_OVRD_EN 0x00008000
|
||||||
|
|
||||||
|
/* Register relative to REG_LED_*_CNTRL (BCM4908) */
|
||||||
|
#define REG_LED_LINK_SPEED_ENC_SEL 0x4
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15
|
||||||
|
#define LED_LINK_SPEED_ENC_SEL_MASK 0x7
|
||||||
|
|
||||||
|
/* Register relative to REG_LED_*_CNTRL (BCM4908) */
|
||||||
|
#define REG_LED_LINK_SPEED_ENC 0x8
|
||||||
|
#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0
|
||||||
|
#define LED_LINK_SPEED_ENC_M10_SHIFT 3
|
||||||
|
#define LED_LINK_SPEED_ENC_M100_SHIFT 6
|
||||||
|
#define LED_LINK_SPEED_ENC_M1000_SHIFT 9
|
||||||
|
#define LED_LINK_SPEED_ENC_M2500_SHIFT 12
|
||||||
|
#define LED_LINK_SPEED_ENC_M10G_SHIFT 15
|
||||||
|
#define LED_LINK_SPEED_ENC_MASK 0x7
|
||||||
|
|
||||||
/* Relative to REG_RGMII_CNTRL */
|
/* Relative to REG_RGMII_CNTRL */
|
||||||
#define RGMII_MODE_EN (1 << 0)
|
#define RGMII_MODE_EN (1 << 0)
|
||||||
#define ID_MODE_DIS (1 << 1)
|
#define ID_MODE_DIS (1 << 1)
|
||||||
@ -73,10 +134,6 @@ enum bcm_sf2_reg_offs {
|
|||||||
#define LPI_COUNT_SHIFT 9
|
#define LPI_COUNT_SHIFT 9
|
||||||
#define LPI_COUNT_MASK 0x3F
|
#define LPI_COUNT_MASK 0x3F
|
||||||
|
|
||||||
#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
|
|
||||||
|
|
||||||
#define SPDLNK_SRC_SEL (1 << 24)
|
|
||||||
|
|
||||||
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
|
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
|
||||||
#define INTRL2_CPU_STATUS 0x00
|
#define INTRL2_CPU_STATUS 0x00
|
||||||
#define INTRL2_CPU_SET 0x04
|
#define INTRL2_CPU_SET 0x04
|
||||||
|
@ -167,19 +167,20 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
|
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
|
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
|
||||||
__func__, port, bridge->name);
|
__func__, port, bridge.dev->name);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
|
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
|
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
|
||||||
__func__, port, bridge->name);
|
__func__, port, bridge.dev->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
|
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
|
||||||
|
@ -1110,12 +1110,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
|
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
struct lan9303 *chip = ds->priv;
|
struct lan9303 *chip = ds->priv;
|
||||||
|
|
||||||
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
|
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
|
||||||
if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) {
|
if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
|
||||||
lan9303_bridge_ports(chip);
|
lan9303_bridge_ports(chip);
|
||||||
chip->is_bridged = true; /* unleash stp_state_set() */
|
chip->is_bridged = true; /* unleash stp_state_set() */
|
||||||
}
|
}
|
||||||
@ -1124,7 +1125,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
|
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct lan9303 *chip = ds->priv;
|
struct lan9303 *chip = ds->priv;
|
||||||
|
|
||||||
|
@ -276,6 +276,7 @@ struct gswip_priv {
|
|||||||
int num_gphy_fw;
|
int num_gphy_fw;
|
||||||
struct gswip_gphy_fw *gphy_fw;
|
struct gswip_gphy_fw *gphy_fw;
|
||||||
u32 port_vlan_filter;
|
u32 port_vlan_filter;
|
||||||
|
struct mutex pce_table_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct gswip_pce_table_entry {
|
struct gswip_pce_table_entry {
|
||||||
@ -528,10 +529,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
|
|||||||
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
|
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
|
||||||
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
|
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
|
||||||
|
|
||||||
|
mutex_lock(&priv->pce_table_lock);
|
||||||
|
|
||||||
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
||||||
GSWIP_PCE_TBL_CTRL_BAS);
|
GSWIP_PCE_TBL_CTRL_BAS);
|
||||||
if (err)
|
if (err) {
|
||||||
|
mutex_unlock(&priv->pce_table_lock);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
|
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
|
||||||
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
|
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
|
||||||
@ -541,8 +546,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
|
|||||||
|
|
||||||
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
||||||
GSWIP_PCE_TBL_CTRL_BAS);
|
GSWIP_PCE_TBL_CTRL_BAS);
|
||||||
if (err)
|
if (err) {
|
||||||
|
mutex_unlock(&priv->pce_table_lock);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
|
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
|
||||||
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
|
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
|
||||||
@ -558,6 +565,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
|
|||||||
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
|
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
|
||||||
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
|
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
|
||||||
|
|
||||||
|
mutex_unlock(&priv->pce_table_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,10 +579,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
|
|||||||
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
|
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
|
||||||
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
|
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
|
||||||
|
|
||||||
|
mutex_lock(&priv->pce_table_lock);
|
||||||
|
|
||||||
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
||||||
GSWIP_PCE_TBL_CTRL_BAS);
|
GSWIP_PCE_TBL_CTRL_BAS);
|
||||||
if (err)
|
if (err) {
|
||||||
|
mutex_unlock(&priv->pce_table_lock);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
|
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
|
||||||
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
|
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
|
||||||
@ -605,8 +618,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
|
|||||||
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
|
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
|
||||||
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
|
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
|
||||||
|
|
||||||
return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
|
||||||
GSWIP_PCE_TBL_CTRL_BAS);
|
GSWIP_PCE_TBL_CTRL_BAS);
|
||||||
|
|
||||||
|
mutex_unlock(&priv->pce_table_lock);
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add the LAN port into a bridge with the CPU port by
|
/* Add the LAN port into a bridge with the CPU port by
|
||||||
@ -747,7 +764,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
|
|||||||
bool vlan_filtering,
|
bool vlan_filtering,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
|
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
|
|
||||||
/* Do not allow changing the VLAN filtering options while in bridge */
|
/* Do not allow changing the VLAN filtering options while in bridge */
|
||||||
@ -1134,16 +1151,18 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
|
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
|
struct net_device *br = bridge.dev;
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* When the bridge uses VLAN filtering we have to configure VLAN
|
/* When the bridge uses VLAN filtering we have to configure VLAN
|
||||||
* specific bridges. No bridge is configured here.
|
* specific bridges. No bridge is configured here.
|
||||||
*/
|
*/
|
||||||
if (!br_vlan_enabled(bridge)) {
|
if (!br_vlan_enabled(br)) {
|
||||||
err = gswip_vlan_add_unaware(priv, bridge, port);
|
err = gswip_vlan_add_unaware(priv, br, port);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
priv->port_vlan_filter &= ~BIT(port);
|
priv->port_vlan_filter &= ~BIT(port);
|
||||||
@ -1154,8 +1173,9 @@ static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
|
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
|
struct net_device *br = bridge.dev;
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
|
|
||||||
gswip_add_single_port_br(priv, port, true);
|
gswip_add_single_port_br(priv, port, true);
|
||||||
@ -1163,16 +1183,16 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
|
|||||||
/* When the bridge uses VLAN filtering we have to configure VLAN
|
/* When the bridge uses VLAN filtering we have to configure VLAN
|
||||||
* specific bridges. No bridge is configured here.
|
* specific bridges. No bridge is configured here.
|
||||||
*/
|
*/
|
||||||
if (!br_vlan_enabled(bridge))
|
if (!br_vlan_enabled(br))
|
||||||
gswip_vlan_remove(priv, bridge, port, 0, true, false);
|
gswip_vlan_remove(priv, br, port, 0, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
|
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan,
|
const struct switchdev_obj_port_vlan *vlan,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
|
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
|
|
||||||
unsigned int max_ports = priv->hw_info->max_ports;
|
unsigned int max_ports = priv->hw_info->max_ports;
|
||||||
int pos = max_ports;
|
int pos = max_ports;
|
||||||
int i, idx = -1;
|
int i, idx = -1;
|
||||||
@ -1217,8 +1237,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
|
|||||||
const struct switchdev_obj_port_vlan *vlan,
|
const struct switchdev_obj_port_vlan *vlan,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
|
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
|
|
||||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||||
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
|
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
|
||||||
int err;
|
int err;
|
||||||
@ -1242,8 +1262,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
|
|||||||
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
|
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan)
|
const struct switchdev_obj_port_vlan *vlan)
|
||||||
{
|
{
|
||||||
|
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
|
|
||||||
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
|
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
|
||||||
|
|
||||||
/* We have to receive all packets on the CPU port and should not
|
/* We have to receive all packets on the CPU port and should not
|
||||||
@ -1328,8 +1348,8 @@ static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
|
|||||||
static int gswip_port_fdb(struct dsa_switch *ds, int port,
|
static int gswip_port_fdb(struct dsa_switch *ds, int port,
|
||||||
const unsigned char *addr, u16 vid, bool add)
|
const unsigned char *addr, u16 vid, bool add)
|
||||||
{
|
{
|
||||||
|
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
|
||||||
struct gswip_priv *priv = ds->priv;
|
struct gswip_priv *priv = ds->priv;
|
||||||
struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
|
|
||||||
struct gswip_pce_table_entry mac_bridge = {0,};
|
struct gswip_pce_table_entry mac_bridge = {0,};
|
||||||
unsigned int cpu_port = priv->hw_info->cpu_port;
|
unsigned int cpu_port = priv->hw_info->cpu_port;
|
||||||
int fid = -1;
|
int fid = -1;
|
||||||
@ -1426,116 +1446,70 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gswip_phylink_set_capab(unsigned long *supported,
|
static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
|
||||||
struct phylink_link_state *state)
|
struct phylink_config *config)
|
||||||
{
|
|
||||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
|
||||||
|
|
||||||
/* Allow all the expected bits */
|
|
||||||
phylink_set(mask, Autoneg);
|
|
||||||
phylink_set_port_modes(mask);
|
|
||||||
phylink_set(mask, Pause);
|
|
||||||
phylink_set(mask, Asym_Pause);
|
|
||||||
|
|
||||||
/* With the exclusion of MII, Reverse MII and Reduced MII, we
|
|
||||||
* support Gigabit, including Half duplex
|
|
||||||
*/
|
|
||||||
if (state->interface != PHY_INTERFACE_MODE_MII &&
|
|
||||||
state->interface != PHY_INTERFACE_MODE_REVMII &&
|
|
||||||
state->interface != PHY_INTERFACE_MODE_RMII) {
|
|
||||||
phylink_set(mask, 1000baseT_Full);
|
|
||||||
phylink_set(mask, 1000baseT_Half);
|
|
||||||
}
|
|
||||||
|
|
||||||
phylink_set(mask, 10baseT_Half);
|
|
||||||
phylink_set(mask, 10baseT_Full);
|
|
||||||
phylink_set(mask, 100baseT_Half);
|
|
||||||
phylink_set(mask, 100baseT_Full);
|
|
||||||
|
|
||||||
bitmap_and(supported, supported, mask,
|
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
bitmap_and(state->advertising, state->advertising, mask,
|
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
|
|
||||||
unsigned long *supported,
|
|
||||||
struct phylink_link_state *state)
|
|
||||||
{
|
{
|
||||||
switch (port) {
|
switch (port) {
|
||||||
case 0:
|
case 0:
|
||||||
case 1:
|
case 1:
|
||||||
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
phy_interface_set_rgmii(config->supported_interfaces);
|
||||||
state->interface != PHY_INTERFACE_MODE_MII &&
|
__set_bit(PHY_INTERFACE_MODE_MII,
|
||||||
state->interface != PHY_INTERFACE_MODE_REVMII &&
|
config->supported_interfaces);
|
||||||
state->interface != PHY_INTERFACE_MODE_RMII)
|
__set_bit(PHY_INTERFACE_MODE_REVMII,
|
||||||
goto unsupported;
|
config->supported_interfaces);
|
||||||
|
__set_bit(PHY_INTERFACE_MODE_RMII,
|
||||||
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 2:
|
case 2:
|
||||||
case 3:
|
case 3:
|
||||||
case 4:
|
case 4:
|
||||||
if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
|
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||||
goto unsupported;
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 5:
|
case 5:
|
||||||
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
phy_interface_set_rgmii(config->supported_interfaces);
|
||||||
state->interface != PHY_INTERFACE_MODE_INTERNAL)
|
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||||
goto unsupported;
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
dev_err(ds->dev, "Unsupported port: %i\n", port);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gswip_phylink_set_capab(supported, state);
|
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100 | MAC_1000;
|
||||||
return;
|
|
||||||
|
|
||||||
unsupported:
|
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
|
|
||||||
phy_modes(state->interface), port);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
|
static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
|
||||||
unsigned long *supported,
|
struct phylink_config *config)
|
||||||
struct phylink_link_state *state)
|
|
||||||
{
|
{
|
||||||
switch (port) {
|
switch (port) {
|
||||||
case 0:
|
case 0:
|
||||||
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
phy_interface_set_rgmii(config->supported_interfaces);
|
||||||
state->interface != PHY_INTERFACE_MODE_GMII &&
|
__set_bit(PHY_INTERFACE_MODE_GMII,
|
||||||
state->interface != PHY_INTERFACE_MODE_RMII)
|
config->supported_interfaces);
|
||||||
goto unsupported;
|
__set_bit(PHY_INTERFACE_MODE_RMII,
|
||||||
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
case 2:
|
case 2:
|
||||||
case 3:
|
case 3:
|
||||||
case 4:
|
case 4:
|
||||||
if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
|
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||||
goto unsupported;
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 5:
|
case 5:
|
||||||
if (!phy_interface_mode_is_rgmii(state->interface) &&
|
phy_interface_set_rgmii(config->supported_interfaces);
|
||||||
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
|
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||||
state->interface != PHY_INTERFACE_MODE_RMII)
|
config->supported_interfaces);
|
||||||
goto unsupported;
|
__set_bit(PHY_INTERFACE_MODE_RMII,
|
||||||
|
config->supported_interfaces);
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
dev_err(ds->dev, "Unsupported port: %i\n", port);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gswip_phylink_set_capab(supported, state);
|
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||||
|
MAC_10 | MAC_100 | MAC_1000;
|
||||||
return;
|
|
||||||
|
|
||||||
unsupported:
|
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
|
|
||||||
phy_modes(state->interface), port);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
|
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
|
||||||
@ -1817,7 +1791,7 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
|
|||||||
.port_fdb_add = gswip_port_fdb_add,
|
.port_fdb_add = gswip_port_fdb_add,
|
||||||
.port_fdb_del = gswip_port_fdb_del,
|
.port_fdb_del = gswip_port_fdb_del,
|
||||||
.port_fdb_dump = gswip_port_fdb_dump,
|
.port_fdb_dump = gswip_port_fdb_dump,
|
||||||
.phylink_validate = gswip_xrx200_phylink_validate,
|
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
|
||||||
.phylink_mac_config = gswip_phylink_mac_config,
|
.phylink_mac_config = gswip_phylink_mac_config,
|
||||||
.phylink_mac_link_down = gswip_phylink_mac_link_down,
|
.phylink_mac_link_down = gswip_phylink_mac_link_down,
|
||||||
.phylink_mac_link_up = gswip_phylink_mac_link_up,
|
.phylink_mac_link_up = gswip_phylink_mac_link_up,
|
||||||
@ -1841,7 +1815,7 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
|
|||||||
.port_fdb_add = gswip_port_fdb_add,
|
.port_fdb_add = gswip_port_fdb_add,
|
||||||
.port_fdb_del = gswip_port_fdb_del,
|
.port_fdb_del = gswip_port_fdb_del,
|
||||||
.port_fdb_dump = gswip_port_fdb_dump,
|
.port_fdb_dump = gswip_port_fdb_dump,
|
||||||
.phylink_validate = gswip_xrx300_phylink_validate,
|
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
|
||||||
.phylink_mac_config = gswip_phylink_mac_config,
|
.phylink_mac_config = gswip_phylink_mac_config,
|
||||||
.phylink_mac_link_down = gswip_phylink_mac_link_down,
|
.phylink_mac_link_down = gswip_phylink_mac_link_down,
|
||||||
.phylink_mac_link_up = gswip_phylink_mac_link_up,
|
.phylink_mac_link_up = gswip_phylink_mac_link_up,
|
||||||
@ -2111,6 +2085,7 @@ static int gswip_probe(struct platform_device *pdev)
|
|||||||
priv->ds->priv = priv;
|
priv->ds->priv = priv;
|
||||||
priv->ds->ops = priv->hw_info->ops;
|
priv->ds->ops = priv->hw_info->ops;
|
||||||
priv->dev = dev;
|
priv->dev = dev;
|
||||||
|
mutex_init(&priv->pce_table_lock);
|
||||||
version = gswip_switch_r(priv, GSWIP_VERSION);
|
version = gswip_switch_r(priv, GSWIP_VERSION);
|
||||||
|
|
||||||
np = dev->of_node;
|
np = dev->of_node;
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/gpio.h>
|
#include <linux/gpio.h>
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/platform_data/microchip-ksz.h>
|
#include <linux/platform_data/microchip-ksz.h>
|
||||||
@ -1002,57 +1003,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
|
|||||||
data &= ~PORT_VLAN_MEMBERSHIP;
|
data &= ~PORT_VLAN_MEMBERSHIP;
|
||||||
data |= (member & dev->port_mask);
|
data |= (member & dev->port_mask);
|
||||||
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
|
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
|
||||||
dev->ports[port].member = member;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
|
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
|
||||||
{
|
{
|
||||||
struct ksz_device *dev = ds->priv;
|
struct ksz_device *dev = ds->priv;
|
||||||
int forward = dev->member;
|
|
||||||
struct ksz_port *p;
|
struct ksz_port *p;
|
||||||
int member = -1;
|
|
||||||
u8 data;
|
u8 data;
|
||||||
|
|
||||||
p = &dev->ports[port];
|
|
||||||
|
|
||||||
ksz_pread8(dev, port, P_STP_CTRL, &data);
|
ksz_pread8(dev, port, P_STP_CTRL, &data);
|
||||||
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case BR_STATE_DISABLED:
|
case BR_STATE_DISABLED:
|
||||||
data |= PORT_LEARN_DISABLE;
|
data |= PORT_LEARN_DISABLE;
|
||||||
if (port < dev->phy_port_cnt)
|
|
||||||
member = 0;
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_LISTENING:
|
case BR_STATE_LISTENING:
|
||||||
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
||||||
if (port < dev->phy_port_cnt &&
|
|
||||||
p->stp_state == BR_STATE_DISABLED)
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_LEARNING:
|
case BR_STATE_LEARNING:
|
||||||
data |= PORT_RX_ENABLE;
|
data |= PORT_RX_ENABLE;
|
||||||
break;
|
break;
|
||||||
case BR_STATE_FORWARDING:
|
case BR_STATE_FORWARDING:
|
||||||
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
|
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
|
||||||
|
|
||||||
/* This function is also used internally. */
|
|
||||||
if (port == dev->cpu_port)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Port is a member of a bridge. */
|
|
||||||
if (dev->br_member & BIT(port)) {
|
|
||||||
dev->member |= BIT(port);
|
|
||||||
member = dev->member;
|
|
||||||
} else {
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_BLOCKING:
|
case BR_STATE_BLOCKING:
|
||||||
data |= PORT_LEARN_DISABLE;
|
data |= PORT_LEARN_DISABLE;
|
||||||
if (port < dev->phy_port_cnt &&
|
|
||||||
p->stp_state == BR_STATE_DISABLED)
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(ds->dev, "invalid STP state: %d\n", state);
|
dev_err(ds->dev, "invalid STP state: %d\n", state);
|
||||||
@ -1060,22 +1036,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ksz_pwrite8(dev, port, P_STP_CTRL, data);
|
ksz_pwrite8(dev, port, P_STP_CTRL, data);
|
||||||
|
|
||||||
|
p = &dev->ports[port];
|
||||||
p->stp_state = state;
|
p->stp_state = state;
|
||||||
/* Port membership may share register with STP state. */
|
|
||||||
if (member >= 0 && member != p->member)
|
|
||||||
ksz8_cfg_port_member(dev, port, (u8)member);
|
|
||||||
|
|
||||||
/* Check if forwarding needs to be updated. */
|
ksz_update_port_member(dev, port);
|
||||||
if (state != BR_STATE_FORWARDING) {
|
|
||||||
if (dev->br_member & BIT(port))
|
|
||||||
dev->member &= ~BIT(port);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When topology has changed the function ksz_update_port_member
|
|
||||||
* should be called to modify port forwarding behavior.
|
|
||||||
*/
|
|
||||||
if (forward != dev->member)
|
|
||||||
ksz_update_port_member(dev, port);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
|
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
|
||||||
@ -1341,7 +1306,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
|
|||||||
|
|
||||||
static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
||||||
{
|
{
|
||||||
struct ksz_port *p = &dev->ports[port];
|
struct dsa_switch *ds = dev->ds;
|
||||||
struct ksz8 *ksz8 = dev->priv;
|
struct ksz8 *ksz8 = dev->priv;
|
||||||
const u32 *masks;
|
const u32 *masks;
|
||||||
u8 member;
|
u8 member;
|
||||||
@ -1368,10 +1333,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
|||||||
if (!ksz_is_ksz88x3(dev))
|
if (!ksz_is_ksz88x3(dev))
|
||||||
ksz8795_cpu_interface_select(dev, port);
|
ksz8795_cpu_interface_select(dev, port);
|
||||||
|
|
||||||
member = dev->port_mask;
|
member = dsa_user_ports(ds);
|
||||||
} else {
|
} else {
|
||||||
member = dev->host_mask | p->vid_member;
|
member = BIT(dsa_upstream_port(ds, port));
|
||||||
}
|
}
|
||||||
|
|
||||||
ksz8_cfg_port_member(dev, port, member);
|
ksz8_cfg_port_member(dev, port, member);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1392,20 +1358,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
|
|||||||
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
|
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
|
||||||
|
|
||||||
p = &dev->ports[dev->cpu_port];
|
p = &dev->ports[dev->cpu_port];
|
||||||
p->vid_member = dev->port_mask;
|
|
||||||
p->on = 1;
|
p->on = 1;
|
||||||
|
|
||||||
ksz8_port_setup(dev, dev->cpu_port, true);
|
ksz8_port_setup(dev, dev->cpu_port, true);
|
||||||
dev->member = dev->host_mask;
|
|
||||||
|
|
||||||
for (i = 0; i < dev->phy_port_cnt; i++) {
|
for (i = 0; i < dev->phy_port_cnt; i++) {
|
||||||
p = &dev->ports[i];
|
p = &dev->ports[i];
|
||||||
|
|
||||||
/* Initialize to non-zero so that ksz_cfg_port_member() will
|
|
||||||
* be called.
|
|
||||||
*/
|
|
||||||
p->vid_member = BIT(i);
|
|
||||||
p->member = dev->port_mask;
|
|
||||||
ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
|
ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
|
||||||
|
|
||||||
/* Last port may be disabled. */
|
/* Last port may be disabled. */
|
||||||
@ -1542,15 +1501,13 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
|
|||||||
phylink_set(mask, 100baseT_Half);
|
phylink_set(mask, 100baseT_Half);
|
||||||
phylink_set(mask, 100baseT_Full);
|
phylink_set(mask, 100baseT_Full);
|
||||||
|
|
||||||
bitmap_and(supported, supported, mask,
|
linkmode_and(supported, supported, mask);
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_and(state->advertising, state->advertising, mask);
|
||||||
bitmap_and(state->advertising, state->advertising, mask,
|
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
unsupported:
|
unsupported:
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_zero(supported);
|
||||||
dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
|
dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
|
||||||
phy_modes(state->interface), port);
|
phy_modes(state->interface), port);
|
||||||
}
|
}
|
||||||
|
@ -124,12 +124,23 @@ static const struct of_device_id ksz8795_dt_ids[] = {
|
|||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
|
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
|
||||||
|
|
||||||
|
static const struct spi_device_id ksz8795_spi_ids[] = {
|
||||||
|
{ "ksz8765" },
|
||||||
|
{ "ksz8794" },
|
||||||
|
{ "ksz8795" },
|
||||||
|
{ "ksz8863" },
|
||||||
|
{ "ksz8873" },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
|
||||||
|
|
||||||
static struct spi_driver ksz8795_spi_driver = {
|
static struct spi_driver ksz8795_spi_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "ksz8795-switch",
|
.name = "ksz8795-switch",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.of_match_table = of_match_ptr(ksz8795_dt_ids),
|
.of_match_table = of_match_ptr(ksz8795_dt_ids),
|
||||||
},
|
},
|
||||||
|
.id_table = ksz8795_spi_ids,
|
||||||
.probe = ksz8795_spi_probe,
|
.probe = ksz8795_spi_probe,
|
||||||
.remove = ksz8795_spi_remove,
|
.remove = ksz8795_spi_remove,
|
||||||
.shutdown = ksz8795_spi_shutdown,
|
.shutdown = ksz8795_spi_shutdown,
|
||||||
|
@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
|
|||||||
u8 member)
|
u8 member)
|
||||||
{
|
{
|
||||||
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
|
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
|
||||||
dev->ports[port].member = member;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
|
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
|
||||||
@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
|
|||||||
struct ksz_device *dev = ds->priv;
|
struct ksz_device *dev = ds->priv;
|
||||||
struct ksz_port *p = &dev->ports[port];
|
struct ksz_port *p = &dev->ports[port];
|
||||||
u8 data;
|
u8 data;
|
||||||
int member = -1;
|
|
||||||
int forward = dev->member;
|
|
||||||
|
|
||||||
ksz_pread8(dev, port, P_STP_CTRL, &data);
|
ksz_pread8(dev, port, P_STP_CTRL, &data);
|
||||||
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
||||||
@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
|
|||||||
switch (state) {
|
switch (state) {
|
||||||
case BR_STATE_DISABLED:
|
case BR_STATE_DISABLED:
|
||||||
data |= PORT_LEARN_DISABLE;
|
data |= PORT_LEARN_DISABLE;
|
||||||
if (port != dev->cpu_port)
|
|
||||||
member = 0;
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_LISTENING:
|
case BR_STATE_LISTENING:
|
||||||
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
|
||||||
if (port != dev->cpu_port &&
|
|
||||||
p->stp_state == BR_STATE_DISABLED)
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_LEARNING:
|
case BR_STATE_LEARNING:
|
||||||
data |= PORT_RX_ENABLE;
|
data |= PORT_RX_ENABLE;
|
||||||
break;
|
break;
|
||||||
case BR_STATE_FORWARDING:
|
case BR_STATE_FORWARDING:
|
||||||
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
|
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
|
||||||
|
|
||||||
/* This function is also used internally. */
|
|
||||||
if (port == dev->cpu_port)
|
|
||||||
break;
|
|
||||||
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
mutex_lock(&dev->dev_mutex);
|
|
||||||
|
|
||||||
/* Port is a member of a bridge. */
|
|
||||||
if (dev->br_member & (1 << port)) {
|
|
||||||
dev->member |= (1 << port);
|
|
||||||
member = dev->member;
|
|
||||||
}
|
|
||||||
mutex_unlock(&dev->dev_mutex);
|
|
||||||
break;
|
break;
|
||||||
case BR_STATE_BLOCKING:
|
case BR_STATE_BLOCKING:
|
||||||
data |= PORT_LEARN_DISABLE;
|
data |= PORT_LEARN_DISABLE;
|
||||||
if (port != dev->cpu_port &&
|
|
||||||
p->stp_state == BR_STATE_DISABLED)
|
|
||||||
member = dev->host_mask | p->vid_member;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(ds->dev, "invalid STP state: %d\n", state);
|
dev_err(ds->dev, "invalid STP state: %d\n", state);
|
||||||
@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
ksz_pwrite8(dev, port, P_STP_CTRL, data);
|
ksz_pwrite8(dev, port, P_STP_CTRL, data);
|
||||||
p->stp_state = state;
|
p->stp_state = state;
|
||||||
mutex_lock(&dev->dev_mutex);
|
|
||||||
/* Port membership may share register with STP state. */
|
|
||||||
if (member >= 0 && member != p->member)
|
|
||||||
ksz9477_cfg_port_member(dev, port, (u8)member);
|
|
||||||
|
|
||||||
/* Check if forwarding needs to be updated. */
|
ksz_update_port_member(dev, port);
|
||||||
if (state != BR_STATE_FORWARDING) {
|
|
||||||
if (dev->br_member & (1 << port))
|
|
||||||
dev->member &= ~(1 << port);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When topology has changed the function ksz_update_port_member
|
|
||||||
* should be called to modify port forwarding behavior.
|
|
||||||
*/
|
|
||||||
if (forward != dev->member)
|
|
||||||
ksz_update_port_member(dev, port);
|
|
||||||
mutex_unlock(&dev->dev_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
|
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
|
||||||
@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
|
|||||||
|
|
||||||
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
||||||
{
|
{
|
||||||
u8 data8;
|
|
||||||
u8 member;
|
|
||||||
u16 data16;
|
|
||||||
struct ksz_port *p = &dev->ports[port];
|
struct ksz_port *p = &dev->ports[port];
|
||||||
|
struct dsa_switch *ds = dev->ds;
|
||||||
|
u8 data8, member;
|
||||||
|
u16 data16;
|
||||||
|
|
||||||
/* enable tag tail for host port */
|
/* enable tag tail for host port */
|
||||||
if (cpu_port)
|
if (cpu_port)
|
||||||
@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
|
|||||||
ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
|
ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
|
||||||
p->phydev.duplex = 1;
|
p->phydev.duplex = 1;
|
||||||
}
|
}
|
||||||
mutex_lock(&dev->dev_mutex);
|
|
||||||
if (cpu_port)
|
if (cpu_port)
|
||||||
member = dev->port_mask;
|
member = dsa_user_ports(ds);
|
||||||
else
|
else
|
||||||
member = dev->host_mask | p->vid_member;
|
member = BIT(dsa_upstream_port(ds, port));
|
||||||
mutex_unlock(&dev->dev_mutex);
|
|
||||||
ksz9477_cfg_port_member(dev, port, member);
|
ksz9477_cfg_port_member(dev, port, member);
|
||||||
|
|
||||||
/* clear pending interrupts */
|
/* clear pending interrupts */
|
||||||
@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
|
|||||||
const char *prev_mode;
|
const char *prev_mode;
|
||||||
|
|
||||||
dev->cpu_port = i;
|
dev->cpu_port = i;
|
||||||
dev->host_mask = (1 << dev->cpu_port);
|
|
||||||
dev->port_mask |= dev->host_mask;
|
|
||||||
p = &dev->ports[i];
|
p = &dev->ports[i];
|
||||||
|
|
||||||
/* Read from XMII register to determine host port
|
/* Read from XMII register to determine host port
|
||||||
@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
|
|||||||
|
|
||||||
/* enable cpu port */
|
/* enable cpu port */
|
||||||
ksz9477_port_setup(dev, i, true);
|
ksz9477_port_setup(dev, i, true);
|
||||||
p->vid_member = dev->port_mask;
|
|
||||||
p->on = 1;
|
p->on = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->member = dev->host_mask;
|
|
||||||
|
|
||||||
for (i = 0; i < dev->port_cnt; i++) {
|
for (i = 0; i < dev->port_cnt; i++) {
|
||||||
if (i == dev->cpu_port)
|
if (i == dev->cpu_port)
|
||||||
continue;
|
continue;
|
||||||
p = &dev->ports[i];
|
p = &dev->ports[i];
|
||||||
|
|
||||||
/* Initialize to non-zero so that ksz_cfg_port_member() will
|
|
||||||
* be called.
|
|
||||||
*/
|
|
||||||
p->vid_member = (1 << i);
|
|
||||||
p->member = dev->port_mask;
|
|
||||||
ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
|
ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
|
||||||
p->on = 1;
|
p->on = 1;
|
||||||
if (i < dev->phy_port_cnt)
|
if (i < dev->phy_port_cnt)
|
||||||
|
@ -98,12 +98,24 @@ static const struct of_device_id ksz9477_dt_ids[] = {
|
|||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
|
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
|
||||||
|
|
||||||
|
static const struct spi_device_id ksz9477_spi_ids[] = {
|
||||||
|
{ "ksz9477" },
|
||||||
|
{ "ksz9897" },
|
||||||
|
{ "ksz9893" },
|
||||||
|
{ "ksz9563" },
|
||||||
|
{ "ksz8563" },
|
||||||
|
{ "ksz9567" },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
|
||||||
|
|
||||||
static struct spi_driver ksz9477_spi_driver = {
|
static struct spi_driver ksz9477_spi_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "ksz9477-switch",
|
.name = "ksz9477-switch",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.of_match_table = of_match_ptr(ksz9477_dt_ids),
|
.of_match_table = of_match_ptr(ksz9477_dt_ids),
|
||||||
},
|
},
|
||||||
|
.id_table = ksz9477_spi_ids,
|
||||||
.probe = ksz9477_spi_probe,
|
.probe = ksz9477_spi_probe,
|
||||||
.remove = ksz9477_spi_remove,
|
.remove = ksz9477_spi_remove,
|
||||||
.shutdown = ksz9477_spi_shutdown,
|
.shutdown = ksz9477_spi_shutdown,
|
||||||
|
@ -22,21 +22,60 @@
|
|||||||
|
|
||||||
void ksz_update_port_member(struct ksz_device *dev, int port)
|
void ksz_update_port_member(struct ksz_device *dev, int port)
|
||||||
{
|
{
|
||||||
struct ksz_port *p;
|
struct ksz_port *p = &dev->ports[port];
|
||||||
int i;
|
struct dsa_switch *ds = dev->ds;
|
||||||
|
u8 port_member = 0, cpu_port;
|
||||||
|
const struct dsa_port *dp;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < dev->port_cnt; i++) {
|
if (!dsa_is_user_port(ds, port))
|
||||||
if (i == port || i == dev->cpu_port)
|
return;
|
||||||
|
|
||||||
|
dp = dsa_to_port(ds, port);
|
||||||
|
cpu_port = BIT(dsa_upstream_port(ds, port));
|
||||||
|
|
||||||
|
for (i = 0; i < ds->num_ports; i++) {
|
||||||
|
const struct dsa_port *other_dp = dsa_to_port(ds, i);
|
||||||
|
struct ksz_port *other_p = &dev->ports[i];
|
||||||
|
u8 val = 0;
|
||||||
|
|
||||||
|
if (!dsa_is_user_port(ds, i))
|
||||||
continue;
|
continue;
|
||||||
p = &dev->ports[i];
|
if (port == i)
|
||||||
if (!(dev->member & (1 << i)))
|
continue;
|
||||||
|
if (!dsa_port_bridge_same(dp, other_dp))
|
||||||
|
continue;
|
||||||
|
if (other_p->stp_state != BR_STATE_FORWARDING)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Port is a member of the bridge and is forwarding. */
|
if (p->stp_state == BR_STATE_FORWARDING) {
|
||||||
if (p->stp_state == BR_STATE_FORWARDING &&
|
val |= BIT(port);
|
||||||
p->member != dev->member)
|
port_member |= BIT(i);
|
||||||
dev->dev_ops->cfg_port_member(dev, i, dev->member);
|
}
|
||||||
|
|
||||||
|
/* Retain port [i]'s relationship to other ports than [port] */
|
||||||
|
for (j = 0; j < ds->num_ports; j++) {
|
||||||
|
const struct dsa_port *third_dp;
|
||||||
|
struct ksz_port *third_p;
|
||||||
|
|
||||||
|
if (j == i)
|
||||||
|
continue;
|
||||||
|
if (j == port)
|
||||||
|
continue;
|
||||||
|
if (!dsa_is_user_port(ds, j))
|
||||||
|
continue;
|
||||||
|
third_p = &dev->ports[j];
|
||||||
|
if (third_p->stp_state != BR_STATE_FORWARDING)
|
||||||
|
continue;
|
||||||
|
third_dp = dsa_to_port(ds, j);
|
||||||
|
if (dsa_port_bridge_same(other_dp, third_dp))
|
||||||
|
val |= BIT(j);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ksz_update_port_member);
|
EXPORT_SYMBOL_GPL(ksz_update_port_member);
|
||||||
|
|
||||||
@ -173,14 +212,9 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
|
|||||||
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
|
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
|
||||||
|
|
||||||
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
|
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
struct ksz_device *dev = ds->priv;
|
|
||||||
|
|
||||||
mutex_lock(&dev->dev_mutex);
|
|
||||||
dev->br_member |= (1 << port);
|
|
||||||
mutex_unlock(&dev->dev_mutex);
|
|
||||||
|
|
||||||
/* port_stp_state_set() will be called after to put the port in
|
/* port_stp_state_set() will be called after to put the port in
|
||||||
* appropriate state so there is no need to do anything.
|
* appropriate state so there is no need to do anything.
|
||||||
*/
|
*/
|
||||||
@ -190,15 +224,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
|
|||||||
EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
|
EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
|
||||||
|
|
||||||
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
|
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct ksz_device *dev = ds->priv;
|
|
||||||
|
|
||||||
mutex_lock(&dev->dev_mutex);
|
|
||||||
dev->br_member &= ~(1 << port);
|
|
||||||
dev->member &= ~(1 << port);
|
|
||||||
mutex_unlock(&dev->dev_mutex);
|
|
||||||
|
|
||||||
/* port_stp_state_set() will be called after to put the port in
|
/* port_stp_state_set() will be called after to put the port in
|
||||||
* forwarding state so there is no need to do anything.
|
* forwarding state so there is no need to do anything.
|
||||||
*/
|
*/
|
||||||
@ -295,7 +322,6 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
|
|||||||
struct ksz_device *dev = ds->priv;
|
struct ksz_device *dev = ds->priv;
|
||||||
struct alu_struct alu;
|
struct alu_struct alu;
|
||||||
int index;
|
int index;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
for (index = 0; index < dev->num_statics; index++) {
|
for (index = 0; index < dev->num_statics; index++) {
|
||||||
if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
|
if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
|
||||||
@ -317,7 +343,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
|
|||||||
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
|
dev->dev_ops->w_sta_mac_table(dev, index, &alu);
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
|
EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
|
||||||
|
|
||||||
|
@ -25,8 +25,6 @@ struct ksz_port_mib {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ksz_port {
|
struct ksz_port {
|
||||||
u16 member;
|
|
||||||
u16 vid_member;
|
|
||||||
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
|
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
|
||||||
int stp_state;
|
int stp_state;
|
||||||
struct phy_device phydev;
|
struct phy_device phydev;
|
||||||
@ -83,8 +81,6 @@ struct ksz_device {
|
|||||||
struct ksz_port *ports;
|
struct ksz_port *ports;
|
||||||
struct delayed_work mib_read;
|
struct delayed_work mib_read;
|
||||||
unsigned long mib_read_interval;
|
unsigned long mib_read_interval;
|
||||||
u16 br_member;
|
|
||||||
u16 member;
|
|
||||||
u16 mirror_rx;
|
u16 mirror_rx;
|
||||||
u16 mirror_tx;
|
u16 mirror_tx;
|
||||||
u32 features; /* chip specific features */
|
u32 features; /* chip specific features */
|
||||||
@ -159,9 +155,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
|
|||||||
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
|
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
|
||||||
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
|
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
|
||||||
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
|
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br);
|
struct dsa_bridge bridge, bool *tx_fwd_offload);
|
||||||
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
|
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br);
|
struct dsa_bridge bridge);
|
||||||
void ksz_port_fast_age(struct dsa_switch *ds, int port);
|
void ksz_port_fast_age(struct dsa_switch *ds, int port);
|
||||||
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
|
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
|
||||||
void *data);
|
void *data);
|
||||||
|
@ -1186,29 +1186,33 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
|
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge, bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
struct mt7530_priv *priv = ds->priv;
|
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
|
||||||
u32 port_bitmap = BIT(MT7530_CPU_PORT);
|
u32 port_bitmap = BIT(MT7530_CPU_PORT);
|
||||||
int i;
|
struct mt7530_priv *priv = ds->priv;
|
||||||
|
|
||||||
mutex_lock(&priv->reg_mutex);
|
mutex_lock(&priv->reg_mutex);
|
||||||
|
|
||||||
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
dsa_switch_for_each_user_port(other_dp, ds) {
|
||||||
|
int other_port = other_dp->index;
|
||||||
|
|
||||||
|
if (dp == other_dp)
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Add this port to the port matrix of the other ports in the
|
/* Add this port to the port matrix of the other ports in the
|
||||||
* same bridge. If the port is disabled, port matrix is kept
|
* same bridge. If the port is disabled, port matrix is kept
|
||||||
* and not being setup until the port becomes enabled.
|
* and not being setup until the port becomes enabled.
|
||||||
*/
|
*/
|
||||||
if (dsa_is_user_port(ds, i) && i != port) {
|
if (!dsa_port_offloads_bridge(other_dp, &bridge))
|
||||||
if (dsa_to_port(ds, i)->bridge_dev != bridge)
|
continue;
|
||||||
continue;
|
|
||||||
if (priv->ports[i].enable)
|
|
||||||
mt7530_set(priv, MT7530_PCR_P(i),
|
|
||||||
PCR_MATRIX(BIT(port)));
|
|
||||||
priv->ports[i].pm |= PCR_MATRIX(BIT(port));
|
|
||||||
|
|
||||||
port_bitmap |= BIT(i);
|
if (priv->ports[other_port].enable)
|
||||||
}
|
mt7530_set(priv, MT7530_PCR_P(other_port),
|
||||||
|
PCR_MATRIX(BIT(port)));
|
||||||
|
priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
|
||||||
|
|
||||||
|
port_bitmap |= BIT(other_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add the all other ports to this port matrix. */
|
/* Add the all other ports to this port matrix. */
|
||||||
@ -1236,7 +1240,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
|
|||||||
/* This is called after .port_bridge_leave when leaving a VLAN-aware
|
/* This is called after .port_bridge_leave when leaving a VLAN-aware
|
||||||
* bridge. Don't set standalone ports to fallback mode.
|
* bridge. Don't set standalone ports to fallback mode.
|
||||||
*/
|
*/
|
||||||
if (dsa_to_port(ds, port)->bridge_dev)
|
if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
|
||||||
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
|
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
|
||||||
MT7530_PORT_FALLBACK_MODE);
|
MT7530_PORT_FALLBACK_MODE);
|
||||||
|
|
||||||
@ -1299,26 +1303,30 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
|
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
|
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
|
||||||
struct mt7530_priv *priv = ds->priv;
|
struct mt7530_priv *priv = ds->priv;
|
||||||
int i;
|
|
||||||
|
|
||||||
mutex_lock(&priv->reg_mutex);
|
mutex_lock(&priv->reg_mutex);
|
||||||
|
|
||||||
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
dsa_switch_for_each_user_port(other_dp, ds) {
|
||||||
|
int other_port = other_dp->index;
|
||||||
|
|
||||||
|
if (dp == other_dp)
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Remove this port from the port matrix of the other ports
|
/* Remove this port from the port matrix of the other ports
|
||||||
* in the same bridge. If the port is disabled, port matrix
|
* in the same bridge. If the port is disabled, port matrix
|
||||||
* is kept and not being setup until the port becomes enabled.
|
* is kept and not being setup until the port becomes enabled.
|
||||||
*/
|
*/
|
||||||
if (dsa_is_user_port(ds, i) && i != port) {
|
if (!dsa_port_offloads_bridge(other_dp, &bridge))
|
||||||
if (dsa_to_port(ds, i)->bridge_dev != bridge)
|
continue;
|
||||||
continue;
|
|
||||||
if (priv->ports[i].enable)
|
if (priv->ports[other_port].enable)
|
||||||
mt7530_clear(priv, MT7530_PCR_P(i),
|
mt7530_clear(priv, MT7530_PCR_P(other_port),
|
||||||
PCR_MATRIX(BIT(port)));
|
PCR_MATRIX(BIT(port)));
|
||||||
priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
|
priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set the cpu port to be the only one in the port matrix of
|
/* Set the cpu port to be the only one in the port matrix of
|
||||||
@ -2928,7 +2936,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
phylink_set_port_modes(mask);
|
phylink_set_port_modes(mask);
|
||||||
|
|
||||||
if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
|
if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
|
||||||
!phy_interface_mode_is_8023z(state->interface)) {
|
!phy_interface_mode_is_8023z(state->interface)) {
|
||||||
phylink_set(mask, 10baseT_Half);
|
phylink_set(mask, 10baseT_Half);
|
||||||
phylink_set(mask, 10baseT_Full);
|
phylink_set(mask, 10baseT_Full);
|
||||||
|
@ -683,9 +683,8 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
|
|||||||
if (chip->info->ops->phylink_validate)
|
if (chip->info->ops->phylink_validate)
|
||||||
chip->info->ops->phylink_validate(chip, port, mask, state);
|
chip->info->ops->phylink_validate(chip, port, mask, state);
|
||||||
|
|
||||||
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_and(supported, supported, mask);
|
||||||
bitmap_and(state->advertising, state->advertising, mask,
|
linkmode_and(state->advertising, state->advertising, mask);
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
|
|
||||||
/* We can only operate at 2500BaseX or 1000BaseX. If requested
|
/* We can only operate at 2500BaseX or 1000BaseX. If requested
|
||||||
* to advertise both, only report advertising at 2500BaseX.
|
* to advertise both, only report advertising at 2500BaseX.
|
||||||
@ -1242,8 +1241,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
|
|||||||
{
|
{
|
||||||
struct dsa_switch *ds = chip->ds;
|
struct dsa_switch *ds = chip->ds;
|
||||||
struct dsa_switch_tree *dst = ds->dst;
|
struct dsa_switch_tree *dst = ds->dst;
|
||||||
struct net_device *br;
|
struct dsa_port *dp, *other_dp;
|
||||||
struct dsa_port *dp;
|
|
||||||
bool found = false;
|
bool found = false;
|
||||||
u16 pvlan;
|
u16 pvlan;
|
||||||
|
|
||||||
@ -1252,11 +1250,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
|
|||||||
list_for_each_entry(dp, &dst->ports, list) {
|
list_for_each_entry(dp, &dst->ports, list) {
|
||||||
if (dp->ds->index == dev && dp->index == port) {
|
if (dp->ds->index == dev && dp->index == port) {
|
||||||
/* dp might be a DSA link or a user port, so it
|
/* dp might be a DSA link or a user port, so it
|
||||||
* might or might not have a bridge_dev
|
* might or might not have a bridge.
|
||||||
* pointer. Use the "found" variable for both
|
* Use the "found" variable for both cases.
|
||||||
* cases.
|
|
||||||
*/
|
*/
|
||||||
br = dp->bridge_dev;
|
|
||||||
found = true;
|
found = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1264,13 +1260,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
|
|||||||
/* dev is a virtual bridge */
|
/* dev is a virtual bridge */
|
||||||
} else {
|
} else {
|
||||||
list_for_each_entry(dp, &dst->ports, list) {
|
list_for_each_entry(dp, &dst->ports, list) {
|
||||||
if (dp->bridge_num < 0)
|
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
|
||||||
|
|
||||||
|
if (!bridge_num)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dp->bridge_num + 1 + dst->last_switch != dev)
|
if (bridge_num + dst->last_switch != dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
br = dp->bridge_dev;
|
|
||||||
found = true;
|
found = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1289,12 +1286,11 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
|
|||||||
/* Frames from user ports can egress any local DSA links and CPU ports,
|
/* Frames from user ports can egress any local DSA links and CPU ports,
|
||||||
* as well as any local member of their bridge group.
|
* as well as any local member of their bridge group.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(dp, &dst->ports, list)
|
dsa_switch_for_each_port(other_dp, ds)
|
||||||
if (dp->ds == ds &&
|
if (other_dp->type == DSA_PORT_TYPE_CPU ||
|
||||||
(dp->type == DSA_PORT_TYPE_CPU ||
|
other_dp->type == DSA_PORT_TYPE_DSA ||
|
||||||
dp->type == DSA_PORT_TYPE_DSA ||
|
dsa_port_bridge_same(dp, other_dp))
|
||||||
(br && dp->bridge_dev == br)))
|
pvlan |= BIT(other_dp->index);
|
||||||
pvlan |= BIT(dp->index);
|
|
||||||
|
|
||||||
return pvlan;
|
return pvlan;
|
||||||
}
|
}
|
||||||
@ -1661,12 +1657,13 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
|
|||||||
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
|
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
|
||||||
u16 vid)
|
u16 vid)
|
||||||
{
|
{
|
||||||
|
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
struct mv88e6xxx_vtu_entry vlan;
|
struct mv88e6xxx_vtu_entry vlan;
|
||||||
int i, err;
|
int err;
|
||||||
|
|
||||||
/* DSA and CPU ports have to be members of multiple vlans */
|
/* DSA and CPU ports have to be members of multiple vlans */
|
||||||
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
|
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
|
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
|
||||||
@ -1676,27 +1673,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
|
|||||||
if (!vlan.valid)
|
if (!vlan.valid)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
|
dsa_switch_for_each_user_port(other_dp, ds) {
|
||||||
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
|
struct net_device *other_br;
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!dsa_to_port(ds, i)->slave)
|
if (vlan.member[other_dp->index] ==
|
||||||
continue;
|
|
||||||
|
|
||||||
if (vlan.member[i] ==
|
|
||||||
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
|
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dsa_to_port(ds, i)->bridge_dev ==
|
if (dsa_port_bridge_same(dp, other_dp))
|
||||||
dsa_to_port(ds, port)->bridge_dev)
|
|
||||||
break; /* same bridge, check next VLAN */
|
break; /* same bridge, check next VLAN */
|
||||||
|
|
||||||
if (!dsa_to_port(ds, i)->bridge_dev)
|
other_br = dsa_port_bridge_dev_get(other_dp);
|
||||||
|
if (!other_br)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
|
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
|
||||||
port, vlan.vid, i,
|
port, vlan.vid, other_dp->index, netdev_name(other_br));
|
||||||
netdev_name(dsa_to_port(ds, i)->bridge_dev));
|
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1706,13 +1698,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
|
|||||||
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
|
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
|
||||||
{
|
{
|
||||||
struct dsa_port *dp = dsa_to_port(chip->ds, port);
|
struct dsa_port *dp = dsa_to_port(chip->ds, port);
|
||||||
|
struct net_device *br = dsa_port_bridge_dev_get(dp);
|
||||||
struct mv88e6xxx_port *p = &chip->ports[port];
|
struct mv88e6xxx_port *p = &chip->ports[port];
|
||||||
u16 pvid = MV88E6XXX_VID_STANDALONE;
|
u16 pvid = MV88E6XXX_VID_STANDALONE;
|
||||||
bool drop_untagged = false;
|
bool drop_untagged = false;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (dp->bridge_dev) {
|
if (br) {
|
||||||
if (br_vlan_enabled(dp->bridge_dev)) {
|
if (br_vlan_enabled(br)) {
|
||||||
pvid = p->bridge_pvid.vid;
|
pvid = p->bridge_pvid.vid;
|
||||||
drop_untagged = !p->bridge_pvid.valid;
|
drop_untagged = !p->bridge_pvid.valid;
|
||||||
} else {
|
} else {
|
||||||
@ -2291,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
|
|||||||
if (!mv88e6xxx_max_vid(chip))
|
if (!mv88e6xxx_max_vid(chip))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* The ATU removal procedure needs the FID to be mapped in the VTU,
|
||||||
|
* but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
|
||||||
|
* switchdev workqueue to ensure that all FDB entries are deleted
|
||||||
|
* before we remove the VLAN.
|
||||||
|
*/
|
||||||
|
dsa_flush_workqueue();
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
|
|
||||||
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
|
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
|
||||||
@ -2430,7 +2430,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
|
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct dsa_switch *ds = chip->ds;
|
struct dsa_switch *ds = chip->ds;
|
||||||
struct dsa_switch_tree *dst = ds->dst;
|
struct dsa_switch_tree *dst = ds->dst;
|
||||||
@ -2438,7 +2438,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
list_for_each_entry(dp, &dst->ports, list) {
|
list_for_each_entry(dp, &dst->ports, list) {
|
||||||
if (dp->bridge_dev == br) {
|
if (dsa_port_offloads_bridge(dp, &bridge)) {
|
||||||
if (dp->ds == ds) {
|
if (dp->ds == ds) {
|
||||||
/* This is a local bridge group member,
|
/* This is a local bridge group member,
|
||||||
* remap its Port VLAN Map.
|
* remap its Port VLAN Map.
|
||||||
@ -2461,15 +2461,29 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Treat the software bridge as a virtual single-port switch behind the
|
||||||
|
* CPU and map in the PVT. First dst->last_switch elements are taken by
|
||||||
|
* physical switches, so start from beyond that range.
|
||||||
|
*/
|
||||||
|
static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
|
||||||
|
unsigned int bridge_num)
|
||||||
|
{
|
||||||
|
u8 dev = bridge_num + ds->dst->last_switch;
|
||||||
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
|
|
||||||
|
return mv88e6xxx_pvt_map(chip, dev, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
|
|
||||||
err = mv88e6xxx_bridge_map(chip, br);
|
err = mv88e6xxx_bridge_map(chip, bridge);
|
||||||
if (err)
|
if (err)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
@ -2477,6 +2491,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
|||||||
if (err)
|
if (err)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
if (mv88e6xxx_has_pvt(chip)) {
|
||||||
|
err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
|
||||||
|
if (err)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
*tx_fwd_offload = true;
|
||||||
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
mv88e6xxx_reg_unlock(chip);
|
mv88e6xxx_reg_unlock(chip);
|
||||||
|
|
||||||
@ -2484,14 +2506,18 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
|
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
|
|
||||||
if (mv88e6xxx_bridge_map(chip, br) ||
|
if (bridge.tx_fwd_offload &&
|
||||||
|
mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
|
||||||
|
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
|
||||||
|
|
||||||
|
if (mv88e6xxx_bridge_map(chip, bridge) ||
|
||||||
mv88e6xxx_port_vlan_map(chip, port))
|
mv88e6xxx_port_vlan_map(chip, port))
|
||||||
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
|
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
|
||||||
|
|
||||||
@ -2506,7 +2532,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
|
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
|
||||||
int tree_index, int sw_index,
|
int tree_index, int sw_index,
|
||||||
int port, struct net_device *br)
|
int port, struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
int err;
|
int err;
|
||||||
@ -2516,6 +2542,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
|
|||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
err = mv88e6xxx_pvt_map(chip, sw_index, port);
|
err = mv88e6xxx_pvt_map(chip, sw_index, port);
|
||||||
|
err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
|
||||||
mv88e6xxx_reg_unlock(chip);
|
mv88e6xxx_reg_unlock(chip);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -2523,7 +2550,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
|
|||||||
|
|
||||||
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
|
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
|
||||||
int tree_index, int sw_index,
|
int tree_index, int sw_index,
|
||||||
int port, struct net_device *br)
|
int port, struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
struct mv88e6xxx_chip *chip = ds->priv;
|
||||||
|
|
||||||
@ -2531,49 +2558,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
if (mv88e6xxx_pvt_map(chip, sw_index, port))
|
if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
|
||||||
|
mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
|
||||||
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
|
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
|
||||||
mv88e6xxx_reg_unlock(chip);
|
mv88e6xxx_reg_unlock(chip);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Treat the software bridge as a virtual single-port switch behind the
|
|
||||||
* CPU and map in the PVT. First dst->last_switch elements are taken by
|
|
||||||
* physical switches, so start from beyond that range.
|
|
||||||
*/
|
|
||||||
static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
|
|
||||||
int bridge_num)
|
|
||||||
{
|
|
||||||
u8 dev = bridge_num + ds->dst->last_switch + 1;
|
|
||||||
struct mv88e6xxx_chip *chip = ds->priv;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
|
||||||
err = mv88e6xxx_pvt_map(chip, dev, 0);
|
|
||||||
mv88e6xxx_reg_unlock(chip);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
|
|
||||||
struct net_device *br,
|
|
||||||
int bridge_num)
|
|
||||||
{
|
|
||||||
return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
|
|
||||||
struct net_device *br,
|
|
||||||
int bridge_num)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
|
|
||||||
if (err) {
|
|
||||||
dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
|
|
||||||
ERR_PTR(err));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
|
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
|
||||||
{
|
{
|
||||||
if (chip->info->ops->reset)
|
if (chip->info->ops->reset)
|
||||||
@ -3200,8 +3190,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
|
|||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
if (mv88e6xxx_has_pvt(chip))
|
if (mv88e6xxx_has_pvt(chip))
|
||||||
ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
|
ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
|
||||||
ds->dst->last_switch - 1;
|
ds->dst->last_switch - 1;
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
|
|
||||||
@ -6298,8 +6288,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
|
|||||||
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
|
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
|
||||||
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
|
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
|
||||||
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
|
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
|
||||||
.port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
|
|
||||||
.port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
|
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
|
||||||
|
@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
|
|||||||
*/
|
*/
|
||||||
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
|
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
|
||||||
|
|
||||||
/* reserved for future extensions */
|
|
||||||
if (config->flags)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
switch (config->tx_type) {
|
switch (config->tx_type) {
|
||||||
case HWTSTAMP_TX_OFF:
|
case HWTSTAMP_TX_OFF:
|
||||||
tstamp_enable = false;
|
tstamp_enable = false;
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -13,38 +13,52 @@
|
|||||||
#include <linux/gpio.h>
|
#include <linux/gpio.h>
|
||||||
|
|
||||||
#define QCA8K_NUM_PORTS 7
|
#define QCA8K_NUM_PORTS 7
|
||||||
|
#define QCA8K_NUM_CPU_PORTS 2
|
||||||
#define QCA8K_MAX_MTU 9000
|
#define QCA8K_MAX_MTU 9000
|
||||||
|
#define QCA8K_NUM_LAGS 4
|
||||||
|
#define QCA8K_NUM_PORTS_FOR_LAG 4
|
||||||
|
|
||||||
#define PHY_ID_QCA8327 0x004dd034
|
#define PHY_ID_QCA8327 0x004dd034
|
||||||
#define QCA8K_ID_QCA8327 0x12
|
#define QCA8K_ID_QCA8327 0x12
|
||||||
#define PHY_ID_QCA8337 0x004dd036
|
#define PHY_ID_QCA8337 0x004dd036
|
||||||
#define QCA8K_ID_QCA8337 0x13
|
#define QCA8K_ID_QCA8337 0x13
|
||||||
|
|
||||||
|
#define QCA8K_QCA832X_MIB_COUNT 39
|
||||||
|
#define QCA8K_QCA833X_MIB_COUNT 41
|
||||||
|
|
||||||
#define QCA8K_BUSY_WAIT_TIMEOUT 2000
|
#define QCA8K_BUSY_WAIT_TIMEOUT 2000
|
||||||
|
|
||||||
#define QCA8K_NUM_FDB_RECORDS 2048
|
#define QCA8K_NUM_FDB_RECORDS 2048
|
||||||
|
|
||||||
#define QCA8K_CPU_PORT 0
|
|
||||||
|
|
||||||
#define QCA8K_PORT_VID_DEF 1
|
#define QCA8K_PORT_VID_DEF 1
|
||||||
|
|
||||||
/* Global control registers */
|
/* Global control registers */
|
||||||
#define QCA8K_REG_MASK_CTRL 0x000
|
#define QCA8K_REG_MASK_CTRL 0x000
|
||||||
#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
|
#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
|
||||||
#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
|
#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
|
||||||
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
|
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
|
||||||
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
|
#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
|
||||||
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
|
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
|
||||||
|
#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
|
||||||
|
#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
|
||||||
|
#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
|
||||||
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
|
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
|
||||||
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
|
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
|
||||||
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
|
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
|
||||||
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
|
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
|
||||||
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
|
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
|
||||||
|
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
|
||||||
|
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
|
||||||
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
|
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
|
||||||
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
|
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
|
||||||
#define QCA8K_MAX_DELAY 3
|
|
||||||
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
|
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
|
||||||
#define QCA8K_REG_PWS 0x010
|
#define QCA8K_REG_PWS 0x010
|
||||||
|
#define QCA8K_PWS_POWER_ON_SEL BIT(31)
|
||||||
|
/* This reg is only valid for QCA832x and toggle the package
|
||||||
|
* type from 176 pin (by default) to 148 pin used on QCA8327
|
||||||
|
*/
|
||||||
|
#define QCA8327_PWS_PACKAGE148_EN BIT(30)
|
||||||
|
#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
|
||||||
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
|
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
|
||||||
#define QCA8K_REG_MODULE_EN 0x030
|
#define QCA8K_REG_MODULE_EN 0x030
|
||||||
#define QCA8K_MODULE_EN_MIB BIT(0)
|
#define QCA8K_MODULE_EN_MIB BIT(0)
|
||||||
@ -58,10 +72,12 @@
|
|||||||
#define QCA8K_MDIO_MASTER_READ BIT(27)
|
#define QCA8K_MDIO_MASTER_READ BIT(27)
|
||||||
#define QCA8K_MDIO_MASTER_WRITE 0
|
#define QCA8K_MDIO_MASTER_WRITE 0
|
||||||
#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
|
#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
|
||||||
#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
|
#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
|
||||||
#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
|
#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
|
||||||
#define QCA8K_MDIO_MASTER_DATA(x) (x)
|
#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
|
||||||
|
#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
|
||||||
#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
|
#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
|
||||||
|
#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
|
||||||
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
|
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
|
||||||
#define QCA8K_MDIO_MASTER_MAX_REG 32
|
#define QCA8K_MDIO_MASTER_MAX_REG 32
|
||||||
#define QCA8K_GOL_MAC_ADDR0 0x60
|
#define QCA8K_GOL_MAC_ADDR0 0x60
|
||||||
@ -83,9 +99,7 @@
|
|||||||
#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
|
#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
|
||||||
#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
|
#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
|
||||||
#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
|
#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
|
||||||
#define QCA8K_PORT_HDR_CTRL_RX_S 2
|
|
||||||
#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
|
#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
|
||||||
#define QCA8K_PORT_HDR_CTRL_TX_S 0
|
|
||||||
#define QCA8K_PORT_HDR_CTRL_ALL 2
|
#define QCA8K_PORT_HDR_CTRL_ALL 2
|
||||||
#define QCA8K_PORT_HDR_CTRL_MGMT 1
|
#define QCA8K_PORT_HDR_CTRL_MGMT 1
|
||||||
#define QCA8K_PORT_HDR_CTRL_NONE 0
|
#define QCA8K_PORT_HDR_CTRL_NONE 0
|
||||||
@ -95,111 +109,167 @@
|
|||||||
#define QCA8K_SGMII_EN_TX BIT(3)
|
#define QCA8K_SGMII_EN_TX BIT(3)
|
||||||
#define QCA8K_SGMII_EN_SD BIT(4)
|
#define QCA8K_SGMII_EN_SD BIT(4)
|
||||||
#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
|
#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
|
||||||
#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
|
#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
|
||||||
#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
|
#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
|
||||||
#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
|
#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
|
||||||
#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
|
#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
|
||||||
|
#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
|
||||||
|
|
||||||
|
/* MAC_PWR_SEL registers */
|
||||||
|
#define QCA8K_REG_MAC_PWR_SEL 0x0e4
|
||||||
|
#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
|
||||||
|
#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
|
||||||
|
|
||||||
/* EEE control registers */
|
/* EEE control registers */
|
||||||
#define QCA8K_REG_EEE_CTRL 0x100
|
#define QCA8K_REG_EEE_CTRL 0x100
|
||||||
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
|
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
|
||||||
|
|
||||||
|
/* TRUNK_HASH_EN registers */
|
||||||
|
#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
|
||||||
|
#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
|
||||||
|
#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
|
||||||
|
#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
|
||||||
|
#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
|
||||||
|
#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
|
||||||
|
|
||||||
/* ACL registers */
|
/* ACL registers */
|
||||||
#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
|
#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
|
||||||
#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
|
#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
|
||||||
#define QCA8K_PORT_VLAN_SVID(x) x
|
#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
|
||||||
|
#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
|
||||||
|
#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
|
||||||
#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
|
#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
|
||||||
#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
|
#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
|
||||||
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
|
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
|
||||||
|
|
||||||
/* Lookup registers */
|
/* Lookup registers */
|
||||||
#define QCA8K_REG_ATU_DATA0 0x600
|
#define QCA8K_REG_ATU_DATA0 0x600
|
||||||
#define QCA8K_ATU_ADDR2_S 24
|
#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
|
||||||
#define QCA8K_ATU_ADDR3_S 16
|
#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
|
||||||
#define QCA8K_ATU_ADDR4_S 8
|
#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
|
||||||
|
#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
|
||||||
#define QCA8K_REG_ATU_DATA1 0x604
|
#define QCA8K_REG_ATU_DATA1 0x604
|
||||||
#define QCA8K_ATU_PORT_M 0x7f
|
#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
|
||||||
#define QCA8K_ATU_PORT_S 16
|
#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
|
||||||
#define QCA8K_ATU_ADDR0_S 8
|
#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
|
||||||
#define QCA8K_REG_ATU_DATA2 0x608
|
#define QCA8K_REG_ATU_DATA2 0x608
|
||||||
#define QCA8K_ATU_VID_M 0xfff
|
#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
|
||||||
#define QCA8K_ATU_VID_S 8
|
#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
|
||||||
#define QCA8K_ATU_STATUS_M 0xf
|
|
||||||
#define QCA8K_ATU_STATUS_STATIC 0xf
|
#define QCA8K_ATU_STATUS_STATIC 0xf
|
||||||
#define QCA8K_REG_ATU_FUNC 0x60c
|
#define QCA8K_REG_ATU_FUNC 0x60c
|
||||||
#define QCA8K_ATU_FUNC_BUSY BIT(31)
|
#define QCA8K_ATU_FUNC_BUSY BIT(31)
|
||||||
#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
|
#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
|
||||||
#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
|
#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
|
||||||
#define QCA8K_ATU_FUNC_FULL BIT(12)
|
#define QCA8K_ATU_FUNC_FULL BIT(12)
|
||||||
#define QCA8K_ATU_FUNC_PORT_M 0xf
|
#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
|
||||||
#define QCA8K_ATU_FUNC_PORT_S 8
|
|
||||||
#define QCA8K_REG_VTU_FUNC0 0x610
|
#define QCA8K_REG_VTU_FUNC0 0x610
|
||||||
#define QCA8K_VTU_FUNC0_VALID BIT(20)
|
#define QCA8K_VTU_FUNC0_VALID BIT(20)
|
||||||
#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
|
#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
|
/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
|
* It does contain VLAN_MODE for each port [5:4] for port0,
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
|
* [7:6] for port1 ... [17:16] for port6. Use virtual port
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
|
* define to handle this.
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
|
*/
|
||||||
#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
|
||||||
|
#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
|
||||||
#define QCA8K_REG_VTU_FUNC1 0x614
|
#define QCA8K_REG_VTU_FUNC1 0x614
|
||||||
#define QCA8K_VTU_FUNC1_BUSY BIT(31)
|
#define QCA8K_VTU_FUNC1_BUSY BIT(31)
|
||||||
#define QCA8K_VTU_FUNC1_VID_S 16
|
#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
|
||||||
#define QCA8K_VTU_FUNC1_FULL BIT(4)
|
#define QCA8K_VTU_FUNC1_FULL BIT(4)
|
||||||
|
#define QCA8K_REG_ATU_CTRL 0x618
|
||||||
|
#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
|
||||||
|
#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
|
||||||
#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
|
#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
|
||||||
#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
|
#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
|
||||||
|
#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
|
||||||
#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
|
#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
|
||||||
#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
|
#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
|
||||||
#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
|
#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
|
||||||
#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
|
#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
|
||||||
#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
|
#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
|
||||||
#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
|
#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
|
||||||
#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
|
#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
|
||||||
#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
|
||||||
#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
|
||||||
#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
|
||||||
#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
|
||||||
#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
|
||||||
|
#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
|
#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
|
#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
|
#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
|
#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
|
#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
|
#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
|
||||||
#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
|
#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
|
||||||
#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
|
#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
|
||||||
|
#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
|
||||||
|
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
|
||||||
|
/* 4 max trunk first
|
||||||
|
* first 6 bit for member bitmap
|
||||||
|
* 7th bit is to enable trunk port
|
||||||
|
*/
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
|
||||||
|
/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
|
||||||
|
#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
|
||||||
|
/* Complex shift: FIRST shift for port THEN shift for trunk */
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
|
||||||
|
#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
|
||||||
|
|
||||||
#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
|
#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
|
||||||
#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
|
#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
|
||||||
#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
|
#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
|
||||||
#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
|
#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
|
||||||
#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
|
#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
|
||||||
|
|
||||||
#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
|
#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
|
||||||
#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
|
#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
|
||||||
|
|
||||||
#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
|
#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
|
#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
|
#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
|
#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
|
#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
|
#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
|
||||||
#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
|
#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
|
||||||
|
|
||||||
/* Pkt edit registers */
|
/* Pkt edit registers */
|
||||||
|
#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
|
||||||
|
#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
|
||||||
|
#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
|
||||||
#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
|
#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
|
||||||
|
|
||||||
/* L3 registers */
|
/* L3 registers */
|
||||||
@ -229,6 +299,7 @@ enum qca8k_fdb_cmd {
|
|||||||
QCA8K_FDB_FLUSH = 1,
|
QCA8K_FDB_FLUSH = 1,
|
||||||
QCA8K_FDB_LOAD = 2,
|
QCA8K_FDB_LOAD = 2,
|
||||||
QCA8K_FDB_PURGE = 3,
|
QCA8K_FDB_PURGE = 3,
|
||||||
|
QCA8K_FDB_FLUSH_PORT = 5,
|
||||||
QCA8K_FDB_NEXT = 6,
|
QCA8K_FDB_NEXT = 6,
|
||||||
QCA8K_FDB_SEARCH = 7,
|
QCA8K_FDB_SEARCH = 7,
|
||||||
};
|
};
|
||||||
@ -248,14 +319,31 @@ struct ar8xxx_port_status {
|
|||||||
|
|
||||||
struct qca8k_match_data {
|
struct qca8k_match_data {
|
||||||
u8 id;
|
u8 id;
|
||||||
|
bool reduced_package;
|
||||||
|
u8 mib_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
QCA8K_CPU_PORT0,
|
||||||
|
QCA8K_CPU_PORT6,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct qca8k_ports_config {
|
||||||
|
bool sgmii_rx_clk_falling_edge;
|
||||||
|
bool sgmii_tx_clk_falling_edge;
|
||||||
|
bool sgmii_enable_pll;
|
||||||
|
u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
|
||||||
|
u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qca8k_priv {
|
struct qca8k_priv {
|
||||||
u8 switch_id;
|
u8 switch_id;
|
||||||
u8 switch_revision;
|
u8 switch_revision;
|
||||||
u8 rgmii_tx_delay;
|
u8 mirror_rx;
|
||||||
u8 rgmii_rx_delay;
|
u8 mirror_tx;
|
||||||
|
u8 lag_hash_mode;
|
||||||
bool legacy_phy_port_mapping;
|
bool legacy_phy_port_mapping;
|
||||||
|
struct qca8k_ports_config ports_config;
|
||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
struct mii_bus *bus;
|
struct mii_bus *bus;
|
||||||
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
|
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
|
||||||
|
@ -456,7 +456,7 @@ static int realtek_smi_probe(struct platform_device *pdev)
|
|||||||
smi->ds->ops = var->ds_ops;
|
smi->ds->ops = var->ds_ops;
|
||||||
ret = dsa_register_switch(smi->ds);
|
ret = dsa_register_switch(smi->ds);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "unable to register switch ret = %d\n", ret);
|
dev_err_probe(dev, ret, "unable to register switch\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {
|
|||||||
.compatible = "realtek,rtl8366s",
|
.compatible = "realtek,rtl8366s",
|
||||||
.data = NULL,
|
.data = NULL,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.compatible = "realtek,rtl8365mb",
|
||||||
|
.data = &rtl8365mb_variant,
|
||||||
|
},
|
||||||
{ /* sentinel */ },
|
{ /* sentinel */ },
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
|
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
|
||||||
|
@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
|
|||||||
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
|
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
|
||||||
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
|
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
|
||||||
int rtl8366_reset_vlan(struct realtek_smi *smi);
|
int rtl8366_reset_vlan(struct realtek_smi *smi);
|
||||||
int rtl8366_init_vlan(struct realtek_smi *smi);
|
|
||||||
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
|
|
||||||
struct netlink_ext_ack *extack);
|
|
||||||
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan,
|
const struct switchdev_obj_port_vlan *vlan,
|
||||||
struct netlink_ext_ack *extack);
|
struct netlink_ext_ack *extack);
|
||||||
@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
|
|||||||
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
|
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
|
||||||
|
|
||||||
extern const struct realtek_smi_variant rtl8366rb_variant;
|
extern const struct realtek_smi_variant rtl8366rb_variant;
|
||||||
|
extern const struct realtek_smi_variant rtl8365mb_variant;
|
||||||
|
|
||||||
#endif /* _REALTEK_SMI_H */
|
#endif /* _REALTEK_SMI_H */
|
||||||
|
@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
|
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
|
||||||
|
|
||||||
int rtl8366_init_vlan(struct realtek_smi *smi)
|
|
||||||
{
|
|
||||||
int port;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = rtl8366_reset_vlan(smi);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Loop over the available ports, for each port, associate
|
|
||||||
* it with the VLAN (port+1)
|
|
||||||
*/
|
|
||||||
for (port = 0; port < smi->num_ports; port++) {
|
|
||||||
u32 mask;
|
|
||||||
|
|
||||||
if (port == smi->cpu_port)
|
|
||||||
/* For the CPU port, make all ports members of this
|
|
||||||
* VLAN.
|
|
||||||
*/
|
|
||||||
mask = GENMASK((int)smi->num_ports - 1, 0);
|
|
||||||
else
|
|
||||||
/* For all other ports, enable itself plus the
|
|
||||||
* CPU port.
|
|
||||||
*/
|
|
||||||
mask = BIT(port) | BIT(smi->cpu_port);
|
|
||||||
|
|
||||||
/* For each port, set the port as member of VLAN (port+1)
|
|
||||||
* and untagged, except for the CPU port: the CPU port (5) is
|
|
||||||
* member of VLAN 6 and so are ALL the other ports as well.
|
|
||||||
* Use filter 0 (no filter).
|
|
||||||
*/
|
|
||||||
dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
|
|
||||||
(port + 1), port, mask);
|
|
||||||
ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
|
|
||||||
(port + 1), port, (port + 1));
|
|
||||||
ret = rtl8366_set_pvid(smi, port, (port + 1));
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return rtl8366_enable_vlan(smi, true);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
|
|
||||||
|
|
||||||
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
|
|
||||||
struct netlink_ext_ack *extack)
|
|
||||||
{
|
|
||||||
struct realtek_smi *smi = ds->priv;
|
|
||||||
struct rtl8366_vlan_4k vlan4k;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Use VLAN nr port + 1 since VLAN0 is not valid */
|
|
||||||
if (!smi->ops->is_vlan_valid(smi, port + 1))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
dev_info(smi->dev, "%s filtering on port %d\n",
|
|
||||||
vlan_filtering ? "enable" : "disable",
|
|
||||||
port);
|
|
||||||
|
|
||||||
/* TODO:
|
|
||||||
* The hardware support filter ID (FID) 0..7, I have no clue how to
|
|
||||||
* support this in the driver when the callback only says on/off.
|
|
||||||
*/
|
|
||||||
ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Just set the filter to FID 1 for now then */
|
|
||||||
ret = rtl8366_set_vlan(smi, port + 1,
|
|
||||||
vlan4k.member,
|
|
||||||
vlan4k.untag,
|
|
||||||
1);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
|
|
||||||
|
|
||||||
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
||||||
const struct switchdev_obj_port_vlan *vlan,
|
const struct switchdev_obj_port_vlan *vlan,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
|
dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
|
||||||
vlan->vid, port, untagged ? "untagged" : "tagged",
|
vlan->vid, port, untagged ? "untagged" : "tagged",
|
||||||
pvid ? " PVID" : "no PVID");
|
pvid ? "PVID" : "no PVID");
|
||||||
|
|
||||||
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
|
|
||||||
dev_err(smi->dev, "port is DSA or CPU port\n");
|
|
||||||
|
|
||||||
member |= BIT(port);
|
member |= BIT(port);
|
||||||
|
|
||||||
@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
|
|||||||
struct realtek_smi *smi = ds->priv;
|
struct realtek_smi *smi = ds->priv;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
|
dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
|
||||||
|
|
||||||
for (i = 0; i < smi->num_vlan_mc; i++) {
|
for (i = 0; i < smi->num_vlan_mc; i++) {
|
||||||
struct rtl8366_vlan_mc vlanmc;
|
struct rtl8366_vlan_mc vlanmc;
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/etherdevice.h>
|
#include <linux/etherdevice.h>
|
||||||
|
#include <linux/if_bridge.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
#include <linux/irqchip/chained_irq.h>
|
#include <linux/irqchip/chained_irq.h>
|
||||||
@ -42,9 +43,12 @@
|
|||||||
/* Port Enable Control register */
|
/* Port Enable Control register */
|
||||||
#define RTL8366RB_PECR 0x0001
|
#define RTL8366RB_PECR 0x0001
|
||||||
|
|
||||||
/* Switch Security Control registers */
|
/* Switch per-port learning disablement register */
|
||||||
#define RTL8366RB_SSCR0 0x0002
|
#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
|
||||||
#define RTL8366RB_SSCR1 0x0003
|
|
||||||
|
/* Security control, actually aging register */
|
||||||
|
#define RTL8366RB_SECURITY_CTRL 0x0003
|
||||||
|
|
||||||
#define RTL8366RB_SSCR2 0x0004
|
#define RTL8366RB_SSCR2 0x0004
|
||||||
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
|
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
|
||||||
|
|
||||||
@ -106,6 +110,18 @@
|
|||||||
|
|
||||||
#define RTL8366RB_POWER_SAVING_REG 0x0021
|
#define RTL8366RB_POWER_SAVING_REG 0x0021
|
||||||
|
|
||||||
|
/* Spanning tree status (STP) control, two bits per port per FID */
|
||||||
|
#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
|
||||||
|
#define RTL8366RB_STP_STATE_DISABLED 0x0
|
||||||
|
#define RTL8366RB_STP_STATE_BLOCKING 0x1
|
||||||
|
#define RTL8366RB_STP_STATE_LEARNING 0x2
|
||||||
|
#define RTL8366RB_STP_STATE_FORWARDING 0x3
|
||||||
|
#define RTL8366RB_STP_MASK GENMASK(1, 0)
|
||||||
|
#define RTL8366RB_STP_STATE(port, state) \
|
||||||
|
((state) << ((port) * 2))
|
||||||
|
#define RTL8366RB_STP_STATE_MASK(port) \
|
||||||
|
RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
|
||||||
|
|
||||||
/* CPU port control reg */
|
/* CPU port control reg */
|
||||||
#define RTL8368RB_CPU_CTRL_REG 0x0061
|
#define RTL8368RB_CPU_CTRL_REG 0x0061
|
||||||
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
|
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
|
||||||
@ -143,6 +159,21 @@
|
|||||||
#define RTL8366RB_PHY_NO_OFFSET 9
|
#define RTL8366RB_PHY_NO_OFFSET 9
|
||||||
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
|
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
|
||||||
|
|
||||||
|
/* VLAN Ingress Control Register 1, one bit per port.
|
||||||
|
* bit 0 .. 5 will make the switch drop ingress frames without
|
||||||
|
* VID such as untagged or priority-tagged frames for respective
|
||||||
|
* port.
|
||||||
|
* bit 6 .. 11 will make the switch drop ingress frames carrying
|
||||||
|
* a C-tag with VID != 0 for respective port.
|
||||||
|
*/
|
||||||
|
#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
|
||||||
|
#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
|
||||||
|
|
||||||
|
/* VLAN Ingress Control Register 2, one bit per port.
|
||||||
|
* bit0 .. bit5 will make the switch drop all ingress frames with
|
||||||
|
* a VLAN classification that does not include the port is in its
|
||||||
|
* member set.
|
||||||
|
*/
|
||||||
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
|
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
|
||||||
|
|
||||||
/* LED control registers */
|
/* LED control registers */
|
||||||
@ -215,6 +246,7 @@
|
|||||||
#define RTL8366RB_NUM_LEDGROUPS 4
|
#define RTL8366RB_NUM_LEDGROUPS 4
|
||||||
#define RTL8366RB_NUM_VIDS 4096
|
#define RTL8366RB_NUM_VIDS 4096
|
||||||
#define RTL8366RB_PRIORITYMAX 7
|
#define RTL8366RB_PRIORITYMAX 7
|
||||||
|
#define RTL8366RB_NUM_FIDS 8
|
||||||
#define RTL8366RB_FIDMAX 7
|
#define RTL8366RB_FIDMAX 7
|
||||||
|
|
||||||
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
|
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
|
||||||
@ -300,6 +332,13 @@
|
|||||||
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
|
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
|
||||||
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
|
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
|
||||||
|
|
||||||
|
/* Port isolation registers */
|
||||||
|
#define RTL8366RB_PORT_ISO_BASE 0x0F08
|
||||||
|
#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
|
||||||
|
#define RTL8366RB_PORT_ISO_EN BIT(0)
|
||||||
|
#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
|
||||||
|
#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
|
||||||
|
|
||||||
/* bits 0..5 enable force when cleared */
|
/* bits 0..5 enable force when cleared */
|
||||||
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
|
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
|
||||||
|
|
||||||
@ -314,9 +353,11 @@
|
|||||||
/**
|
/**
|
||||||
* struct rtl8366rb - RTL8366RB-specific data
|
* struct rtl8366rb - RTL8366RB-specific data
|
||||||
* @max_mtu: per-port max MTU setting
|
* @max_mtu: per-port max MTU setting
|
||||||
|
* @pvid_enabled: if PVID is set for respective port
|
||||||
*/
|
*/
|
||||||
struct rtl8366rb {
|
struct rtl8366rb {
|
||||||
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
|
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
|
||||||
|
bool pvid_enabled[RTL8366RB_NUM_PORTS];
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
|
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
|
||||||
@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/* Isolate all user ports so they can only send packets to itself and the CPU port */
|
||||||
|
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
|
||||||
|
ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
|
||||||
|
RTL8366RB_PORT_ISO_EN);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
/* CPU port can send packets to all ports */
|
||||||
|
ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
|
||||||
|
RTL8366RB_PORT_ISO_EN);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Set up the "green ethernet" feature */
|
/* Set up the "green ethernet" feature */
|
||||||
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
|
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
|
||||||
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
|
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
|
||||||
@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
|
|||||||
/* layer 2 size, see rtl8366rb_change_mtu() */
|
/* layer 2 size, see rtl8366rb_change_mtu() */
|
||||||
rb->max_mtu[i] = 1532;
|
rb->max_mtu[i] = 1532;
|
||||||
|
|
||||||
/* Enable learning for all ports */
|
/* Disable learning for all ports */
|
||||||
ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
|
ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
|
||||||
|
RTL8366RB_PORT_ALL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Enable auto ageing for all ports */
|
/* Enable auto ageing for all ports */
|
||||||
ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
|
ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Discard VLAN tagged packets if the port is not a member of
|
/* Accept all packets by default, we enable filtering on-demand */
|
||||||
* the VLAN with which the packets is associated.
|
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
|
||||||
*/
|
0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
|
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
|
||||||
RTL8366RB_PORT_ALL);
|
0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rtl8366_init_vlan(smi);
|
ret = rtl8366_reset_vlan(smi);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
ds->configure_vlan_while_not_filtering = false;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1127,6 +1184,191 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
|
|||||||
rb8366rb_set_port_led(smi, port, false);
|
rb8366rb_set_port_led(smi, port, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
unsigned int port_bitmap = 0;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
/* Loop over all other ports than the current one */
|
||||||
|
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
|
||||||
|
/* Current port handled last */
|
||||||
|
if (i == port)
|
||||||
|
continue;
|
||||||
|
/* Not on this bridge */
|
||||||
|
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||||
|
continue;
|
||||||
|
/* Join this port to each other port on the bridge */
|
||||||
|
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
|
||||||
|
if (ret)
|
||||||
|
dev_err(smi->dev, "failed to join port %d\n", port);
|
||||||
|
|
||||||
|
port_bitmap |= BIT(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the bits for the ports we can access */
|
||||||
|
return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
|
struct dsa_bridge bridge)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
unsigned int port_bitmap = 0;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
/* Loop over all other ports than this one */
|
||||||
|
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
|
||||||
|
/* Current port handled last */
|
||||||
|
if (i == port)
|
||||||
|
continue;
|
||||||
|
/* Not on this bridge */
|
||||||
|
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||||
|
continue;
|
||||||
|
/* Remove this port from any other port on the bridge */
|
||||||
|
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
|
||||||
|
if (ret)
|
||||||
|
dev_err(smi->dev, "failed to leave port %d\n", port);
|
||||||
|
|
||||||
|
port_bitmap |= BIT(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clear the bits for the ports we can not access, leave ourselves */
|
||||||
|
regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
|
||||||
|
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
|
||||||
|
* @smi: SMI state container
|
||||||
|
* @port: the port to drop untagged and C-tagged frames on
|
||||||
|
* @drop: whether to drop or pass untagged and C-tagged frames
|
||||||
|
*/
|
||||||
|
static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
|
||||||
|
{
|
||||||
|
return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
|
||||||
|
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
|
||||||
|
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
|
||||||
|
bool vlan_filtering,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
struct rtl8366rb *rb;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
rb = smi->chip_data;
|
||||||
|
|
||||||
|
dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
|
||||||
|
vlan_filtering ? "enable" : "disable");
|
||||||
|
|
||||||
|
/* If the port is not in the member set, the frame will be dropped */
|
||||||
|
ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
|
||||||
|
BIT(port), vlan_filtering ? BIT(port) : 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* If VLAN filtering is enabled and PVID is also enabled, we must
|
||||||
|
* not drop any untagged or C-tagged frames. If we turn off VLAN
|
||||||
|
* filtering on a port, we need to accept any frames.
|
||||||
|
*/
|
||||||
|
if (vlan_filtering)
|
||||||
|
ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
|
||||||
|
else
|
||||||
|
ret = rtl8366rb_drop_untagged(smi, port, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
|
||||||
|
struct switchdev_brport_flags flags,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
|
{
|
||||||
|
/* We support enabling/disabling learning */
|
||||||
|
if (flags.mask & ~(BR_LEARNING))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
|
||||||
|
struct switchdev_brport_flags flags,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (flags.mask & BR_LEARNING) {
|
||||||
|
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
|
||||||
|
BIT(port),
|
||||||
|
(flags.val & BR_LEARNING) ? 0 : BIT(port));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
u32 val;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
switch (state) {
|
||||||
|
case BR_STATE_DISABLED:
|
||||||
|
val = RTL8366RB_STP_STATE_DISABLED;
|
||||||
|
break;
|
||||||
|
case BR_STATE_BLOCKING:
|
||||||
|
case BR_STATE_LISTENING:
|
||||||
|
val = RTL8366RB_STP_STATE_BLOCKING;
|
||||||
|
break;
|
||||||
|
case BR_STATE_LEARNING:
|
||||||
|
val = RTL8366RB_STP_STATE_LEARNING;
|
||||||
|
break;
|
||||||
|
case BR_STATE_FORWARDING:
|
||||||
|
val = RTL8366RB_STP_STATE_FORWARDING;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_err(smi->dev, "unknown bridge state requested\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the same status for the port on all the FIDs */
|
||||||
|
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
|
||||||
|
regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
|
||||||
|
RTL8366RB_STP_STATE_MASK(port),
|
||||||
|
RTL8366RB_STP_STATE(port, val));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
|
||||||
|
{
|
||||||
|
struct realtek_smi *smi = ds->priv;
|
||||||
|
|
||||||
|
/* This will age out any learned L2 entries */
|
||||||
|
regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
|
||||||
|
BIT(port), BIT(port));
|
||||||
|
/* Restore the normal state of things */
|
||||||
|
regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
|
||||||
|
BIT(port), 0);
|
||||||
|
}
|
||||||
|
|
||||||
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
|
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
|
||||||
{
|
{
|
||||||
struct realtek_smi *smi = ds->priv;
|
struct realtek_smi *smi = ds->priv;
|
||||||
@ -1338,14 +1580,34 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
|
|||||||
|
|
||||||
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
|
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
|
||||||
{
|
{
|
||||||
|
struct rtl8366rb *rb;
|
||||||
|
bool pvid_enabled;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
rb = smi->chip_data;
|
||||||
|
pvid_enabled = !!index;
|
||||||
|
|
||||||
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
|
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
|
ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
|
||||||
RTL8366RB_PORT_VLAN_CTRL_MASK <<
|
RTL8366RB_PORT_VLAN_CTRL_MASK <<
|
||||||
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
|
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
|
||||||
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
|
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
|
||||||
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
|
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
rb->pvid_enabled[port] = pvid_enabled;
|
||||||
|
|
||||||
|
/* If VLAN filtering is enabled and PVID is also enabled, we must
|
||||||
|
* not drop any untagged or C-tagged frames. Make sure to update the
|
||||||
|
* filtering setting.
|
||||||
|
*/
|
||||||
|
if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
|
||||||
|
ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
|
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
|
||||||
@ -1355,7 +1617,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
|
|||||||
if (smi->vlan4k_enabled)
|
if (smi->vlan4k_enabled)
|
||||||
max = RTL8366RB_NUM_VIDS - 1;
|
max = RTL8366RB_NUM_VIDS - 1;
|
||||||
|
|
||||||
if (vlan == 0 || vlan > max)
|
if (vlan > max)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -1510,11 +1772,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
|
|||||||
.get_strings = rtl8366_get_strings,
|
.get_strings = rtl8366_get_strings,
|
||||||
.get_ethtool_stats = rtl8366_get_ethtool_stats,
|
.get_ethtool_stats = rtl8366_get_ethtool_stats,
|
||||||
.get_sset_count = rtl8366_get_sset_count,
|
.get_sset_count = rtl8366_get_sset_count,
|
||||||
.port_vlan_filtering = rtl8366_vlan_filtering,
|
.port_bridge_join = rtl8366rb_port_bridge_join,
|
||||||
|
.port_bridge_leave = rtl8366rb_port_bridge_leave,
|
||||||
|
.port_vlan_filtering = rtl8366rb_vlan_filtering,
|
||||||
.port_vlan_add = rtl8366_vlan_add,
|
.port_vlan_add = rtl8366_vlan_add,
|
||||||
.port_vlan_del = rtl8366_vlan_del,
|
.port_vlan_del = rtl8366_vlan_del,
|
||||||
.port_enable = rtl8366rb_port_enable,
|
.port_enable = rtl8366rb_port_enable,
|
||||||
.port_disable = rtl8366rb_port_disable,
|
.port_disable = rtl8366rb_port_disable,
|
||||||
|
.port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
|
||||||
|
.port_bridge_flags = rtl8366rb_port_bridge_flags,
|
||||||
|
.port_stp_state_set = rtl8366rb_port_stp_state_set,
|
||||||
|
.port_fast_age = rtl8366rb_port_fast_age,
|
||||||
.port_change_mtu = rtl8366rb_change_mtu,
|
.port_change_mtu = rtl8366rb_change_mtu,
|
||||||
.port_max_mtu = rtl8366rb_max_mtu,
|
.port_max_mtu = rtl8366rb_max_mtu,
|
||||||
};
|
};
|
||||||
|
@ -20,6 +20,27 @@
|
|||||||
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
|
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
|
||||||
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
|
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
|
||||||
|
|
||||||
|
/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
|
||||||
|
* To avoid floating point operations, we'll multiply the degrees by 10
|
||||||
|
* to get a "phase" and get 1 decimal point precision.
|
||||||
|
*/
|
||||||
|
#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
|
||||||
|
(((ps) * 360) / 800)
|
||||||
|
#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
|
||||||
|
((800 * (phase)) / 360)
|
||||||
|
#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
|
||||||
|
(((phase) - 738) / 9)
|
||||||
|
#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
|
||||||
|
SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
|
||||||
|
|
||||||
|
/* Valid range in degrees is a value between 73.8 and 101.7
|
||||||
|
* in 0.9 degree increments
|
||||||
|
*/
|
||||||
|
#define SJA1105_RGMII_DELAY_MIN_PS \
|
||||||
|
SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
|
||||||
|
#define SJA1105_RGMII_DELAY_MAX_PS \
|
||||||
|
SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
SPI_READ = 0,
|
SPI_READ = 0,
|
||||||
SPI_WRITE = 1,
|
SPI_WRITE = 1,
|
||||||
@ -222,33 +243,35 @@ struct sja1105_flow_block {
|
|||||||
|
|
||||||
struct sja1105_private {
|
struct sja1105_private {
|
||||||
struct sja1105_static_config static_config;
|
struct sja1105_static_config static_config;
|
||||||
bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
|
int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
|
||||||
bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
|
int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
|
||||||
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
|
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
|
||||||
bool fixed_link[SJA1105_MAX_NUM_PORTS];
|
bool fixed_link[SJA1105_MAX_NUM_PORTS];
|
||||||
bool vlan_aware;
|
|
||||||
unsigned long ucast_egress_floods;
|
unsigned long ucast_egress_floods;
|
||||||
unsigned long bcast_egress_floods;
|
unsigned long bcast_egress_floods;
|
||||||
|
unsigned long hwts_tx_en;
|
||||||
const struct sja1105_info *info;
|
const struct sja1105_info *info;
|
||||||
size_t max_xfer_len;
|
size_t max_xfer_len;
|
||||||
struct gpio_desc *reset_gpio;
|
|
||||||
struct spi_device *spidev;
|
struct spi_device *spidev;
|
||||||
struct dsa_switch *ds;
|
struct dsa_switch *ds;
|
||||||
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
|
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
|
||||||
u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
|
u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
|
||||||
struct sja1105_flow_block flow_block;
|
struct sja1105_flow_block flow_block;
|
||||||
struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
|
|
||||||
/* Serializes transmission of management frames so that
|
/* Serializes transmission of management frames so that
|
||||||
* the switch doesn't confuse them with one another.
|
* the switch doesn't confuse them with one another.
|
||||||
*/
|
*/
|
||||||
struct mutex mgmt_lock;
|
struct mutex mgmt_lock;
|
||||||
|
/* PTP two-step TX timestamp ID, and its serialization lock */
|
||||||
|
spinlock_t ts_id_lock;
|
||||||
|
u8 ts_id;
|
||||||
|
/* Serializes access to the dynamic config interface */
|
||||||
|
struct mutex dynamic_config_lock;
|
||||||
struct devlink_region **regions;
|
struct devlink_region **regions;
|
||||||
struct sja1105_cbs_entry *cbs;
|
struct sja1105_cbs_entry *cbs;
|
||||||
struct mii_bus *mdio_base_t1;
|
struct mii_bus *mdio_base_t1;
|
||||||
struct mii_bus *mdio_base_tx;
|
struct mii_bus *mdio_base_tx;
|
||||||
struct mii_bus *mdio_pcs;
|
struct mii_bus *mdio_pcs;
|
||||||
struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
|
struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
|
||||||
struct sja1105_tagger_data tagger_data;
|
|
||||||
struct sja1105_ptp_data ptp_data;
|
struct sja1105_ptp_data ptp_data;
|
||||||
struct sja1105_tas_data tas_data;
|
struct sja1105_tas_data tas_data;
|
||||||
};
|
};
|
||||||
|
@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
|
|||||||
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
|
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Valid range in degrees is an integer between 73.8 and 101.7 */
|
|
||||||
static u64 sja1105_rgmii_delay(u64 phase)
|
|
||||||
{
|
|
||||||
/* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
|
|
||||||
* To avoid floating point operations we'll multiply by 10
|
|
||||||
* and get 1 decimal point precision.
|
|
||||||
*/
|
|
||||||
phase *= 10;
|
|
||||||
return (phase - 738) / 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The RGMII delay setup procedure is 2-step and gets called upon each
|
/* The RGMII delay setup procedure is 2-step and gets called upon each
|
||||||
* .phylink_mac_config. Both are strategic.
|
* .phylink_mac_config. Both are strategic.
|
||||||
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
|
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
|
||||||
@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
|
|||||||
const struct sja1105_private *priv = ctx;
|
const struct sja1105_private *priv = ctx;
|
||||||
const struct sja1105_regs *regs = priv->info->regs;
|
const struct sja1105_regs *regs = priv->info->regs;
|
||||||
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
|
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
|
||||||
|
int rx_delay = priv->rgmii_rx_delay_ps[port];
|
||||||
|
int tx_delay = priv->rgmii_tx_delay_ps[port];
|
||||||
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
|
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (priv->rgmii_rx_delay[port])
|
if (rx_delay)
|
||||||
pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
|
pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
|
||||||
if (priv->rgmii_tx_delay[port])
|
if (tx_delay)
|
||||||
pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
|
pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
|
||||||
|
|
||||||
/* Stage 1: Turn the RGMII delay lines off. */
|
/* Stage 1: Turn the RGMII delay lines off. */
|
||||||
pad_mii_id.rxc_bypass = 1;
|
pad_mii_id.rxc_bypass = 1;
|
||||||
@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
|
|||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* Stage 2: Turn the RGMII delay lines on. */
|
/* Stage 2: Turn the RGMII delay lines on. */
|
||||||
if (priv->rgmii_rx_delay[port]) {
|
if (rx_delay) {
|
||||||
pad_mii_id.rxc_bypass = 0;
|
pad_mii_id.rxc_bypass = 0;
|
||||||
pad_mii_id.rxc_pd = 0;
|
pad_mii_id.rxc_pd = 0;
|
||||||
}
|
}
|
||||||
if (priv->rgmii_tx_delay[port]) {
|
if (tx_delay) {
|
||||||
pad_mii_id.txc_bypass = 0;
|
pad_mii_id.txc_bypass = 0;
|
||||||
pad_mii_id.txc_pd = 0;
|
pad_mii_id.txc_pd = 0;
|
||||||
}
|
}
|
||||||
@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)
|
|||||||
const struct sja1105_private *priv = ctx;
|
const struct sja1105_private *priv = ctx;
|
||||||
const struct sja1105_regs *regs = priv->info->regs;
|
const struct sja1105_regs *regs = priv->info->regs;
|
||||||
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
|
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
|
||||||
|
int rx_delay = priv->rgmii_rx_delay_ps[port];
|
||||||
|
int tx_delay = priv->rgmii_tx_delay_ps[port];
|
||||||
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
|
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
|
||||||
|
|
||||||
pad_mii_id.rxc_pd = 1;
|
pad_mii_id.rxc_pd = 1;
|
||||||
pad_mii_id.txc_pd = 1;
|
pad_mii_id.txc_pd = 1;
|
||||||
|
|
||||||
if (priv->rgmii_rx_delay[port]) {
|
if (rx_delay) {
|
||||||
pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
|
pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
|
||||||
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
|
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
|
||||||
pad_mii_id.rxc_bypass = 1;
|
pad_mii_id.rxc_bypass = 1;
|
||||||
pad_mii_id.rxc_pd = 0;
|
pad_mii_id.rxc_pd = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (priv->rgmii_tx_delay[port]) {
|
if (tx_delay) {
|
||||||
pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
|
pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
|
||||||
pad_mii_id.txc_bypass = 1;
|
pad_mii_id.txc_bypass = 1;
|
||||||
pad_mii_id.txc_pd = 0;
|
pad_mii_id.txc_pd = 0;
|
||||||
}
|
}
|
||||||
|
@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
|
||||||
|
#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
|
||||||
|
|
||||||
|
static int
|
||||||
|
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
|
||||||
|
struct sja1105_dyn_cmd *cmd,
|
||||||
|
const struct sja1105_dynamic_table_ops *ops)
|
||||||
|
{
|
||||||
|
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* We don't _need_ to read the full entry, just the command area which
|
||||||
|
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
|
||||||
|
* buffer that contains the full entry too. Additionally, our API
|
||||||
|
* doesn't really know how many bytes into the buffer does the command
|
||||||
|
* area really begin. So just read back the whole entry.
|
||||||
|
*/
|
||||||
|
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
|
||||||
|
ops->packed_size);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/* Unpack the command structure, and return it to the caller in case it
|
||||||
|
* needs to perform further checks on it (VALIDENT).
|
||||||
|
*/
|
||||||
|
memset(cmd, 0, sizeof(*cmd));
|
||||||
|
ops->cmd_packing(packed_buf, cmd, UNPACK);
|
||||||
|
|
||||||
|
/* Hardware hasn't cleared VALID => still working on it */
|
||||||
|
return cmd->valid ? -EAGAIN : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Poll the dynamic config entry's control area until the hardware has
|
||||||
|
* cleared the VALID bit, which means we have confirmation that it has
|
||||||
|
* finished processing the command.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
|
||||||
|
struct sja1105_dyn_cmd *cmd,
|
||||||
|
const struct sja1105_dynamic_table_ops *ops)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
|
||||||
|
rc, rc != -EAGAIN,
|
||||||
|
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
|
||||||
|
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
|
||||||
|
false, priv, cmd, ops);
|
||||||
|
}
|
||||||
|
|
||||||
/* Provides read access to the settings through the dynamic interface
|
/* Provides read access to the settings through the dynamic interface
|
||||||
* of the switch.
|
* of the switch.
|
||||||
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
|
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
|
||||||
@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
|
|||||||
struct sja1105_dyn_cmd cmd = {0};
|
struct sja1105_dyn_cmd cmd = {0};
|
||||||
/* SPI payload buffer */
|
/* SPI payload buffer */
|
||||||
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
|
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
|
||||||
int retries = 3;
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (blk_idx >= BLK_IDX_MAX_DYN)
|
if (blk_idx >= BLK_IDX_MAX_DYN)
|
||||||
@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
|
|||||||
ops->entry_packing(packed_buf, entry, PACK);
|
ops->entry_packing(packed_buf, entry, PACK);
|
||||||
|
|
||||||
/* Send SPI write operation: read config table entry */
|
/* Send SPI write operation: read config table entry */
|
||||||
|
mutex_lock(&priv->dynamic_config_lock);
|
||||||
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
|
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
|
||||||
ops->packed_size);
|
ops->packed_size);
|
||||||
|
if (rc < 0) {
|
||||||
|
mutex_unlock(&priv->dynamic_config_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
|
||||||
|
mutex_unlock(&priv->dynamic_config_lock);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* Loop until we have confirmation that hardware has finished
|
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
|
||||||
* processing the command and has cleared the VALID field
|
return -ENOENT;
|
||||||
*/
|
|
||||||
do {
|
|
||||||
memset(packed_buf, 0, ops->packed_size);
|
|
||||||
|
|
||||||
/* Retrieve the read operation's result */
|
|
||||||
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
|
|
||||||
ops->packed_size);
|
|
||||||
if (rc < 0)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
cmd = (struct sja1105_dyn_cmd) {0};
|
|
||||||
ops->cmd_packing(packed_buf, &cmd, UNPACK);
|
|
||||||
|
|
||||||
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
|
|
||||||
return -ENOENT;
|
|
||||||
cpu_relax();
|
|
||||||
} while (cmd.valid && --retries);
|
|
||||||
|
|
||||||
if (cmd.valid)
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
|
|
||||||
/* Don't dereference possibly NULL pointer - maybe caller
|
/* Don't dereference possibly NULL pointer - maybe caller
|
||||||
* only wanted to see whether the entry existed or not.
|
* only wanted to see whether the entry existed or not.
|
||||||
@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
|
|||||||
ops->entry_packing(packed_buf, entry, PACK);
|
ops->entry_packing(packed_buf, entry, PACK);
|
||||||
|
|
||||||
/* Send SPI write operation: read config table entry */
|
/* Send SPI write operation: read config table entry */
|
||||||
|
mutex_lock(&priv->dynamic_config_lock);
|
||||||
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
|
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
|
||||||
ops->packed_size);
|
ops->packed_size);
|
||||||
|
if (rc < 0) {
|
||||||
|
mutex_unlock(&priv->dynamic_config_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
|
||||||
|
mutex_unlock(&priv->dynamic_config_lock);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -379,7 +379,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
|
|||||||
vl_rule = true;
|
vl_rule = true;
|
||||||
|
|
||||||
rc = sja1105_vl_gate(priv, port, extack, cookie,
|
rc = sja1105_vl_gate(priv, port, extack, cookie,
|
||||||
&key, act->gate.index,
|
&key, act->hw_index,
|
||||||
act->gate.prio,
|
act->gate.prio,
|
||||||
act->gate.basetime,
|
act->gate.basetime,
|
||||||
act->gate.cycletime,
|
act->gate.cycletime,
|
||||||
|
@ -27,15 +27,29 @@
|
|||||||
|
|
||||||
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
|
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
|
||||||
|
|
||||||
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
|
/* Configure the optional reset pin and bring up switch */
|
||||||
unsigned int startup_delay)
|
static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
|
||||||
|
unsigned int startup_delay)
|
||||||
{
|
{
|
||||||
|
struct gpio_desc *gpio;
|
||||||
|
|
||||||
|
gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
|
||||||
|
if (IS_ERR(gpio))
|
||||||
|
return PTR_ERR(gpio);
|
||||||
|
|
||||||
|
if (!gpio)
|
||||||
|
return 0;
|
||||||
|
|
||||||
gpiod_set_value_cansleep(gpio, 1);
|
gpiod_set_value_cansleep(gpio, 1);
|
||||||
/* Wait for minimum reset pulse length */
|
/* Wait for minimum reset pulse length */
|
||||||
msleep(pulse_len);
|
msleep(pulse_len);
|
||||||
gpiod_set_value_cansleep(gpio, 0);
|
gpiod_set_value_cansleep(gpio, 0);
|
||||||
/* Wait until chip is ready after reset */
|
/* Wait until chip is ready after reset */
|
||||||
msleep(startup_delay);
|
msleep(startup_delay);
|
||||||
|
|
||||||
|
gpiod_put(gpio);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -104,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
|
|||||||
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
|
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
|
||||||
{
|
{
|
||||||
struct dsa_port *dp = dsa_to_port(ds, port);
|
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||||
|
struct net_device *br = dsa_port_bridge_dev_get(dp);
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_vlan_lookup_entry *vlan;
|
struct sja1105_vlan_lookup_entry *vlan;
|
||||||
bool drop_untagged = false;
|
bool drop_untagged = false;
|
||||||
int match, rc;
|
int match, rc;
|
||||||
u16 pvid;
|
u16 pvid;
|
||||||
|
|
||||||
if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
|
if (br && br_vlan_enabled(br))
|
||||||
pvid = priv->bridge_pvid[port];
|
pvid = priv->bridge_pvid[port];
|
||||||
else
|
else
|
||||||
pvid = priv->tag_8021q_pvid[port];
|
pvid = priv->tag_8021q_pvid[port];
|
||||||
@ -1095,27 +1110,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)
|
|||||||
return sja1105_static_config_upload(priv);
|
return sja1105_static_config_upload(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
|
/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
|
||||||
|
* based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
|
||||||
|
* properties. It has the advantage of working with fixed links and with PHYs
|
||||||
|
* that apply RGMII delays too, and the MAC driver needs not perform any
|
||||||
|
* special checks.
|
||||||
|
*
|
||||||
|
* Previously we were acting upon the "phy-mode" property when we were
|
||||||
|
* operating in fixed-link, basically acting as a PHY, but with a reversed
|
||||||
|
* interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
|
||||||
|
* behave as if it is connected to a PHY which has applied RGMII delays in the
|
||||||
|
* TX direction. So if anything, RX delays should have been added by the MAC,
|
||||||
|
* but we were adding TX delays.
|
||||||
|
*
|
||||||
|
* If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
|
||||||
|
* back to the legacy behavior and apply delays on fixed-link ports based on
|
||||||
|
* the reverse interpretation of the phy-mode. This is a deviation from the
|
||||||
|
* expected default behavior which is to simply apply no delays. To achieve
|
||||||
|
* that behavior with the new bindings, it is mandatory to specify
|
||||||
|
* "{rx,tx}-internal-delay-ps" with a value of 0.
|
||||||
|
*/
|
||||||
|
static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
|
||||||
|
struct device_node *port_dn)
|
||||||
{
|
{
|
||||||
struct dsa_switch *ds = priv->ds;
|
phy_interface_t phy_mode = priv->phy_mode[port];
|
||||||
int port;
|
struct device *dev = &priv->spidev->dev;
|
||||||
|
int rx_delay = -1, tx_delay = -1;
|
||||||
|
|
||||||
for (port = 0; port < ds->num_ports; port++) {
|
if (!phy_interface_mode_is_rgmii(phy_mode))
|
||||||
if (!priv->fixed_link[port])
|
return 0;
|
||||||
continue;
|
|
||||||
|
|
||||||
if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
|
of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
|
||||||
priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
|
of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
|
||||||
priv->rgmii_rx_delay[port] = true;
|
|
||||||
|
|
||||||
if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
|
if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
|
||||||
priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
|
dev_warn(dev,
|
||||||
priv->rgmii_tx_delay[port] = true;
|
"Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
|
||||||
|
"please update device tree to specify \"rx-internal-delay-ps\" and "
|
||||||
|
"\"tx-internal-delay-ps\"",
|
||||||
|
port);
|
||||||
|
|
||||||
if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
|
if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
|
||||||
!priv->info->setup_rgmii_delay)
|
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
|
||||||
return -EINVAL;
|
rx_delay = 2000;
|
||||||
|
|
||||||
|
if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
|
||||||
|
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
|
||||||
|
tx_delay = 2000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rx_delay < 0)
|
||||||
|
rx_delay = 0;
|
||||||
|
if (tx_delay < 0)
|
||||||
|
tx_delay = 0;
|
||||||
|
|
||||||
|
if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
|
||||||
|
dev_err(dev, "Chip cannot apply RGMII delays\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
|
||||||
|
(tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
|
||||||
|
(rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
|
||||||
|
(tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
|
||||||
|
dev_err(dev,
|
||||||
|
"port %d RGMII delay values out of range, must be between %d and %d ps\n",
|
||||||
|
port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
|
||||||
|
return -ERANGE;
|
||||||
|
}
|
||||||
|
|
||||||
|
priv->rgmii_rx_delay_ps[port] = rx_delay;
|
||||||
|
priv->rgmii_tx_delay_ps[port] = tx_delay;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1166,6 +1232,12 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
priv->phy_mode[index] = phy_mode;
|
priv->phy_mode[index] = phy_mode;
|
||||||
|
|
||||||
|
err = sja1105_parse_rgmii_delays(priv, index, child);
|
||||||
|
if (err) {
|
||||||
|
of_node_put(child);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1360,7 +1432,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
|
|||||||
*/
|
*/
|
||||||
if (state->interface != PHY_INTERFACE_MODE_NA &&
|
if (state->interface != PHY_INTERFACE_MODE_NA &&
|
||||||
sja1105_phy_mode_mismatch(priv, port, state->interface)) {
|
sja1105_phy_mode_mismatch(priv, port, state->interface)) {
|
||||||
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_zero(supported);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1380,9 +1452,8 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
|
|||||||
phylink_set(mask, 2500baseX_Full);
|
phylink_set(mask, 2500baseX_Full);
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
linkmode_and(supported, supported, mask);
|
||||||
bitmap_and(state->advertising, state->advertising, mask,
|
linkmode_and(state->advertising, state->advertising, mask);
|
||||||
__ETHTOOL_LINK_MODE_MASK_NBITS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -1766,6 +1837,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
|
|||||||
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
||||||
dsa_fdb_dump_cb_t *cb, void *data)
|
dsa_fdb_dump_cb_t *cb, void *data)
|
||||||
{
|
{
|
||||||
|
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct device *dev = ds->dev;
|
struct device *dev = ds->dev;
|
||||||
int i;
|
int i;
|
||||||
@ -1802,7 +1874,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
|||||||
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
|
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
|
||||||
|
|
||||||
/* We need to hide the dsa_8021q VLANs from the user. */
|
/* We need to hide the dsa_8021q VLANs from the user. */
|
||||||
if (!priv->vlan_aware)
|
if (!dsa_port_is_vlan_filtering(dp))
|
||||||
l2_lookup.vlanid = 0;
|
l2_lookup.vlanid = 0;
|
||||||
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||||
if (rc)
|
if (rc)
|
||||||
@ -1908,7 +1980,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
|
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br, bool member)
|
struct dsa_bridge bridge, bool member)
|
||||||
{
|
{
|
||||||
struct sja1105_l2_forwarding_entry *l2_fwd;
|
struct sja1105_l2_forwarding_entry *l2_fwd;
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
@ -1933,7 +2005,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
|
|||||||
*/
|
*/
|
||||||
if (i == port)
|
if (i == port)
|
||||||
continue;
|
continue;
|
||||||
if (dsa_to_port(ds, i)->bridge_dev != br)
|
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
|
||||||
continue;
|
continue;
|
||||||
sja1105_port_allow_traffic(l2_fwd, i, port, member);
|
sja1105_port_allow_traffic(l2_fwd, i, port, member);
|
||||||
sja1105_port_allow_traffic(l2_fwd, port, i, member);
|
sja1105_port_allow_traffic(l2_fwd, port, i, member);
|
||||||
@ -2002,15 +2074,31 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
|
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge,
|
||||||
|
bool *tx_fwd_offload)
|
||||||
{
|
{
|
||||||
return sja1105_bridge_member(ds, port, br, true);
|
int rc;
|
||||||
|
|
||||||
|
rc = sja1105_bridge_member(ds, port, bridge, true);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge);
|
||||||
|
if (rc) {
|
||||||
|
sja1105_bridge_member(ds, port, bridge, false);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
*tx_fwd_offload = true;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
|
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
|
||||||
struct net_device *br)
|
struct dsa_bridge bridge)
|
||||||
{
|
{
|
||||||
sja1105_bridge_member(ds, port, br, false);
|
dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge);
|
||||||
|
sja1105_bridge_member(ds, port, bridge, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BYTES_PER_KBIT (1000LL / 8)
|
#define BYTES_PER_KBIT (1000LL / 8)
|
||||||
@ -2295,11 +2383,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
|
|||||||
tpid2 = ETH_P_SJA1105;
|
tpid2 = ETH_P_SJA1105;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (priv->vlan_aware == enabled)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
priv->vlan_aware = enabled;
|
|
||||||
|
|
||||||
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
|
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
|
||||||
general_params = table->entries;
|
general_params = table->entries;
|
||||||
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
|
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
|
||||||
@ -2332,7 +2415,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
|
|||||||
*/
|
*/
|
||||||
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
|
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
|
||||||
l2_lookup_params = table->entries;
|
l2_lookup_params = table->entries;
|
||||||
l2_lookup_params->shared_learn = !priv->vlan_aware;
|
l2_lookup_params->shared_learn = !enabled;
|
||||||
|
|
||||||
for (port = 0; port < ds->num_ports; port++) {
|
for (port = 0; port < ds->num_ports; port++) {
|
||||||
if (dsa_is_unused_port(ds, port))
|
if (dsa_is_unused_port(ds, port))
|
||||||
@ -2521,8 +2604,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
|
|||||||
|
|
||||||
if (netif_is_bridge_master(upper)) {
|
if (netif_is_bridge_master(upper)) {
|
||||||
list_for_each_entry(dp, &dst->ports, list) {
|
list_for_each_entry(dp, &dst->ports, list) {
|
||||||
if (dp->bridge_dev && dp->bridge_dev != upper &&
|
struct net_device *br = dsa_port_bridge_dev_get(dp);
|
||||||
br_vlan_enabled(dp->bridge_dev)) {
|
|
||||||
|
if (br && br != upper && br_vlan_enabled(br)) {
|
||||||
NL_SET_ERR_MSG_MOD(extack,
|
NL_SET_ERR_MSG_MOD(extack,
|
||||||
"Only one VLAN-aware bridge is supported");
|
"Only one VLAN-aware bridge is supported");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
@ -2533,18 +2617,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sja1105_port_disable(struct dsa_switch *ds, int port)
|
|
||||||
{
|
|
||||||
struct sja1105_private *priv = ds->priv;
|
|
||||||
struct sja1105_port *sp = &priv->ports[port];
|
|
||||||
|
|
||||||
if (!dsa_is_user_port(ds, port))
|
|
||||||
return;
|
|
||||||
|
|
||||||
kthread_cancel_work_sync(&sp->xmit_work);
|
|
||||||
skb_queue_purge(&sp->xmit_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
|
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
|
||||||
struct sk_buff *skb, bool takets)
|
struct sk_buff *skb, bool takets)
|
||||||
{
|
{
|
||||||
@ -2603,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
|
|||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define work_to_port(work) \
|
#define work_to_xmit_work(w) \
|
||||||
container_of((work), struct sja1105_port, xmit_work)
|
container_of((w), struct sja1105_deferred_xmit_work, work)
|
||||||
#define tagger_to_sja1105(t) \
|
|
||||||
container_of((t), struct sja1105_private, tagger_data)
|
|
||||||
|
|
||||||
/* Deferred work is unfortunately necessary because setting up the management
|
/* Deferred work is unfortunately necessary because setting up the management
|
||||||
* route cannot be done from atomit context (SPI transfer takes a sleepable
|
* route cannot be done from atomit context (SPI transfer takes a sleepable
|
||||||
@ -2614,25 +2684,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
|
|||||||
*/
|
*/
|
||||||
static void sja1105_port_deferred_xmit(struct kthread_work *work)
|
static void sja1105_port_deferred_xmit(struct kthread_work *work)
|
||||||
{
|
{
|
||||||
struct sja1105_port *sp = work_to_port(work);
|
struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
|
||||||
struct sja1105_tagger_data *tagger_data = sp->data;
|
struct sk_buff *clone, *skb = xmit_work->skb;
|
||||||
struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
|
struct dsa_switch *ds = xmit_work->dp->ds;
|
||||||
int port = sp - priv->ports;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sk_buff *skb;
|
int port = xmit_work->dp->index;
|
||||||
|
|
||||||
while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
|
clone = SJA1105_SKB_CB(skb)->clone;
|
||||||
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
|
|
||||||
|
|
||||||
mutex_lock(&priv->mgmt_lock);
|
mutex_lock(&priv->mgmt_lock);
|
||||||
|
|
||||||
sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
|
sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
|
||||||
|
|
||||||
/* The clone, if there, was made by dsa_skb_tx_timestamp */
|
/* The clone, if there, was made by dsa_skb_tx_timestamp */
|
||||||
if (clone)
|
if (clone)
|
||||||
sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
|
sja1105_ptp_txtstamp_skb(ds, port, clone);
|
||||||
|
|
||||||
mutex_unlock(&priv->mgmt_lock);
|
mutex_unlock(&priv->mgmt_lock);
|
||||||
}
|
|
||||||
|
kfree(xmit_work);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
|
||||||
|
enum dsa_tag_protocol proto)
|
||||||
|
{
|
||||||
|
struct sja1105_private *priv = ds->priv;
|
||||||
|
struct sja1105_tagger_data *tagger_data;
|
||||||
|
|
||||||
|
if (proto != priv->info->tag_proto)
|
||||||
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
|
tagger_data = sja1105_tagger_data(ds);
|
||||||
|
tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
|
||||||
|
tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
|
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
|
||||||
@ -2935,59 +3021,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sja1105_teardown_ports(struct sja1105_private *priv)
|
|
||||||
{
|
|
||||||
struct dsa_switch *ds = priv->ds;
|
|
||||||
int port;
|
|
||||||
|
|
||||||
for (port = 0; port < ds->num_ports; port++) {
|
|
||||||
struct sja1105_port *sp = &priv->ports[port];
|
|
||||||
|
|
||||||
if (sp->xmit_worker)
|
|
||||||
kthread_destroy_worker(sp->xmit_worker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sja1105_setup_ports(struct sja1105_private *priv)
|
|
||||||
{
|
|
||||||
struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
|
|
||||||
struct dsa_switch *ds = priv->ds;
|
|
||||||
int port, rc;
|
|
||||||
|
|
||||||
/* Connections between dsa_port and sja1105_port */
|
|
||||||
for (port = 0; port < ds->num_ports; port++) {
|
|
||||||
struct sja1105_port *sp = &priv->ports[port];
|
|
||||||
struct dsa_port *dp = dsa_to_port(ds, port);
|
|
||||||
struct kthread_worker *worker;
|
|
||||||
struct net_device *slave;
|
|
||||||
|
|
||||||
if (!dsa_port_is_user(dp))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
dp->priv = sp;
|
|
||||||
sp->dp = dp;
|
|
||||||
sp->data = tagger_data;
|
|
||||||
slave = dp->slave;
|
|
||||||
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
|
|
||||||
worker = kthread_create_worker(0, "%s_xmit", slave->name);
|
|
||||||
if (IS_ERR(worker)) {
|
|
||||||
rc = PTR_ERR(worker);
|
|
||||||
dev_err(ds->dev,
|
|
||||||
"failed to create deferred xmit thread: %d\n",
|
|
||||||
rc);
|
|
||||||
goto out_destroy_workers;
|
|
||||||
}
|
|
||||||
sp->xmit_worker = worker;
|
|
||||||
skb_queue_head_init(&sp->xmit_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_destroy_workers:
|
|
||||||
sja1105_teardown_ports(priv);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The programming model for the SJA1105 switch is "all-at-once" via static
|
/* The programming model for the SJA1105 switch is "all-at-once" via static
|
||||||
* configuration tables. Some of these can be dynamically modified at runtime,
|
* configuration tables. Some of these can be dynamically modified at runtime,
|
||||||
* but not the xMII mode parameters table.
|
* but not the xMII mode parameters table.
|
||||||
@ -3033,10 +3066,6 @@ static int sja1105_setup(struct dsa_switch *ds)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = sja1105_setup_ports(priv);
|
|
||||||
if (rc)
|
|
||||||
goto out_static_config_free;
|
|
||||||
|
|
||||||
sja1105_tas_setup(ds);
|
sja1105_tas_setup(ds);
|
||||||
sja1105_flower_setup(ds);
|
sja1105_flower_setup(ds);
|
||||||
|
|
||||||
@ -3074,7 +3103,7 @@ static int sja1105_setup(struct dsa_switch *ds)
|
|||||||
ds->vlan_filtering_is_global = true;
|
ds->vlan_filtering_is_global = true;
|
||||||
ds->untag_bridge_pvid = true;
|
ds->untag_bridge_pvid = true;
|
||||||
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
|
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
|
||||||
ds->num_fwd_offloading_bridges = 7;
|
ds->max_num_bridges = 7;
|
||||||
|
|
||||||
/* Advertise the 8 egress queues */
|
/* Advertise the 8 egress queues */
|
||||||
ds->num_tx_queues = SJA1105_NUM_TC;
|
ds->num_tx_queues = SJA1105_NUM_TC;
|
||||||
@ -3093,7 +3122,6 @@ static int sja1105_setup(struct dsa_switch *ds)
|
|||||||
out_flower_teardown:
|
out_flower_teardown:
|
||||||
sja1105_flower_teardown(ds);
|
sja1105_flower_teardown(ds);
|
||||||
sja1105_tas_teardown(ds);
|
sja1105_tas_teardown(ds);
|
||||||
sja1105_teardown_ports(priv);
|
|
||||||
out_static_config_free:
|
out_static_config_free:
|
||||||
sja1105_static_config_free(&priv->static_config);
|
sja1105_static_config_free(&priv->static_config);
|
||||||
|
|
||||||
@ -3113,12 +3141,12 @@ static void sja1105_teardown(struct dsa_switch *ds)
|
|||||||
sja1105_ptp_clock_unregister(ds);
|
sja1105_ptp_clock_unregister(ds);
|
||||||
sja1105_flower_teardown(ds);
|
sja1105_flower_teardown(ds);
|
||||||
sja1105_tas_teardown(ds);
|
sja1105_tas_teardown(ds);
|
||||||
sja1105_teardown_ports(priv);
|
|
||||||
sja1105_static_config_free(&priv->static_config);
|
sja1105_static_config_free(&priv->static_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dsa_switch_ops sja1105_switch_ops = {
|
static const struct dsa_switch_ops sja1105_switch_ops = {
|
||||||
.get_tag_protocol = sja1105_get_tag_protocol,
|
.get_tag_protocol = sja1105_get_tag_protocol,
|
||||||
|
.connect_tag_protocol = sja1105_connect_tag_protocol,
|
||||||
.setup = sja1105_setup,
|
.setup = sja1105_setup,
|
||||||
.teardown = sja1105_teardown,
|
.teardown = sja1105_teardown,
|
||||||
.set_ageing_time = sja1105_set_ageing_time,
|
.set_ageing_time = sja1105_set_ageing_time,
|
||||||
@ -3132,7 +3160,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
|
|||||||
.get_ethtool_stats = sja1105_get_ethtool_stats,
|
.get_ethtool_stats = sja1105_get_ethtool_stats,
|
||||||
.get_sset_count = sja1105_get_sset_count,
|
.get_sset_count = sja1105_get_sset_count,
|
||||||
.get_ts_info = sja1105_get_ts_info,
|
.get_ts_info = sja1105_get_ts_info,
|
||||||
.port_disable = sja1105_port_disable,
|
|
||||||
.port_fdb_dump = sja1105_fdb_dump,
|
.port_fdb_dump = sja1105_fdb_dump,
|
||||||
.port_fdb_add = sja1105_fdb_add,
|
.port_fdb_add = sja1105_fdb_add,
|
||||||
.port_fdb_del = sja1105_fdb_del,
|
.port_fdb_del = sja1105_fdb_del,
|
||||||
@ -3163,8 +3190,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
|
|||||||
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
|
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
|
||||||
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
|
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
|
||||||
.port_prechangeupper = sja1105_prechangeupper,
|
.port_prechangeupper = sja1105_prechangeupper,
|
||||||
.port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
|
|
||||||
.port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id sja1105_dt_ids[];
|
static const struct of_device_id sja1105_dt_ids[];
|
||||||
@ -3229,17 +3254,14 @@ static int sja1105_probe(struct spi_device *spi)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = sja1105_hw_reset(dev, 1, 1);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
|
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Configure the optional reset pin and bring up switch */
|
|
||||||
priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
|
||||||
if (IS_ERR(priv->reset_gpio))
|
|
||||||
dev_dbg(dev, "reset-gpios not defined, ignoring\n");
|
|
||||||
else
|
|
||||||
sja1105_hw_reset(priv->reset_gpio, 1, 1);
|
|
||||||
|
|
||||||
/* Populate our driver private structure (priv) based on
|
/* Populate our driver private structure (priv) based on
|
||||||
* the device tree node that was probed (spi)
|
* the device tree node that was probed (spi)
|
||||||
*/
|
*/
|
||||||
@ -3303,7 +3325,9 @@ static int sja1105_probe(struct spi_device *spi)
|
|||||||
priv->ds = ds;
|
priv->ds = ds;
|
||||||
|
|
||||||
mutex_init(&priv->ptp_data.lock);
|
mutex_init(&priv->ptp_data.lock);
|
||||||
|
mutex_init(&priv->dynamic_config_lock);
|
||||||
mutex_init(&priv->mgmt_lock);
|
mutex_init(&priv->mgmt_lock);
|
||||||
|
spin_lock_init(&priv->ts_id_lock);
|
||||||
|
|
||||||
rc = sja1105_parse_dt(priv);
|
rc = sja1105_parse_dt(priv);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
@ -3311,15 +3335,6 @@ static int sja1105_probe(struct spi_device *spi)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Error out early if internal delays are required through DT
|
|
||||||
* and we can't apply them.
|
|
||||||
*/
|
|
||||||
rc = sja1105_parse_rgmii_delays(priv);
|
|
||||||
if (rc < 0) {
|
|
||||||
dev_err(ds->dev, "RGMII delay not supported\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
|
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
|
||||||
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
|
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
|
||||||
sizeof(struct sja1105_cbs_entry),
|
sizeof(struct sja1105_cbs_entry),
|
||||||
|
@ -58,13 +58,12 @@ enum sja1105_ptp_clk_mode {
|
|||||||
#define ptp_data_to_sja1105(d) \
|
#define ptp_data_to_sja1105(d) \
|
||||||
container_of((d), struct sja1105_private, ptp_data)
|
container_of((d), struct sja1105_private, ptp_data)
|
||||||
|
|
||||||
/* Must be called only with priv->tagger_data.state bit
|
/* Must be called only while the RX timestamping state of the tagger
|
||||||
* SJA1105_HWTS_RX_EN cleared
|
* is turned off
|
||||||
*/
|
*/
|
||||||
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
|
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
|
||||||
bool on)
|
bool on)
|
||||||
{
|
{
|
||||||
struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
|
|
||||||
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
struct sja1105_general_params_entry *general_params;
|
struct sja1105_general_params_entry *general_params;
|
||||||
struct sja1105_table *table;
|
struct sja1105_table *table;
|
||||||
@ -74,13 +73,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
|
|||||||
general_params->send_meta1 = on;
|
general_params->send_meta1 = on;
|
||||||
general_params->send_meta0 = on;
|
general_params->send_meta0 = on;
|
||||||
|
|
||||||
/* Initialize the meta state machine to a known state */
|
|
||||||
if (priv->tagger_data.stampable_skb) {
|
|
||||||
kfree_skb(priv->tagger_data.stampable_skb);
|
|
||||||
priv->tagger_data.stampable_skb = NULL;
|
|
||||||
}
|
|
||||||
ptp_cancel_worker_sync(ptp_data->clock);
|
ptp_cancel_worker_sync(ptp_data->clock);
|
||||||
skb_queue_purge(&tagger_data->skb_txtstamp_queue);
|
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
|
||||||
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
|
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
|
||||||
|
|
||||||
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
|
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
|
||||||
@ -88,6 +82,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
|
|||||||
|
|
||||||
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
||||||
{
|
{
|
||||||
|
struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct hwtstamp_config config;
|
struct hwtstamp_config config;
|
||||||
bool rx_on;
|
bool rx_on;
|
||||||
@ -98,10 +93,10 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
|||||||
|
|
||||||
switch (config.tx_type) {
|
switch (config.tx_type) {
|
||||||
case HWTSTAMP_TX_OFF:
|
case HWTSTAMP_TX_OFF:
|
||||||
priv->ports[port].hwts_tx_en = false;
|
priv->hwts_tx_en &= ~BIT(port);
|
||||||
break;
|
break;
|
||||||
case HWTSTAMP_TX_ON:
|
case HWTSTAMP_TX_ON:
|
||||||
priv->ports[port].hwts_tx_en = true;
|
priv->hwts_tx_en |= BIT(port);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
@ -116,8 +111,8 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
|
if (rx_on != tagger_data->rxtstamp_get_state(ds)) {
|
||||||
clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
|
tagger_data->rxtstamp_set_state(ds, false);
|
||||||
|
|
||||||
rc = sja1105_change_rxtstamping(priv, rx_on);
|
rc = sja1105_change_rxtstamping(priv, rx_on);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
@ -126,7 +121,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
if (rx_on)
|
if (rx_on)
|
||||||
set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
|
tagger_data->rxtstamp_set_state(ds, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
|
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
|
||||||
@ -136,15 +131,16 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
|||||||
|
|
||||||
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
|
||||||
{
|
{
|
||||||
|
struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct hwtstamp_config config;
|
struct hwtstamp_config config;
|
||||||
|
|
||||||
config.flags = 0;
|
config.flags = 0;
|
||||||
if (priv->ports[port].hwts_tx_en)
|
if (priv->hwts_tx_en & BIT(port))
|
||||||
config.tx_type = HWTSTAMP_TX_ON;
|
config.tx_type = HWTSTAMP_TX_ON;
|
||||||
else
|
else
|
||||||
config.tx_type = HWTSTAMP_TX_OFF;
|
config.tx_type = HWTSTAMP_TX_OFF;
|
||||||
if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
|
if (tagger_data->rxtstamp_get_state(ds))
|
||||||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
|
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
|
||||||
else
|
else
|
||||||
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||||
@ -417,10 +413,11 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
|
|||||||
|
|
||||||
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
|
|
||||||
if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
|
if (!tagger_data->rxtstamp_get_state(ds))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* We need to read the full PTP clock to reconstruct the Rx
|
/* We need to read the full PTP clock to reconstruct the Rx
|
||||||
@ -453,6 +450,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
|
|||||||
return priv->info->rxtstamp(ds, port, skb);
|
return priv->info->rxtstamp(ds, port, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
|
||||||
|
enum sja1110_meta_tstamp dir, u64 tstamp)
|
||||||
|
{
|
||||||
|
struct sja1105_private *priv = ds->priv;
|
||||||
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
|
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
|
||||||
|
struct skb_shared_hwtstamps shwt = {0};
|
||||||
|
|
||||||
|
/* We don't care about RX timestamps on the CPU port */
|
||||||
|
if (dir == SJA1110_META_TSTAMP_RX)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock(&ptp_data->skb_txtstamp_queue.lock);
|
||||||
|
|
||||||
|
skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
|
||||||
|
if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
__skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
|
||||||
|
skb_match = skb;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
|
||||||
|
|
||||||
|
if (WARN_ON(!skb_match))
|
||||||
|
return;
|
||||||
|
|
||||||
|
shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
|
||||||
|
skb_complete_tx_timestamp(skb_match, &shwt);
|
||||||
|
}
|
||||||
|
|
||||||
/* In addition to cloning the skb which is done by the common
|
/* In addition to cloning the skb which is done by the common
|
||||||
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
|
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
|
||||||
* packet to the TX timestamping queue.
|
* packet to the TX timestamping queue.
|
||||||
@ -461,22 +491,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
|
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_port *sp = &priv->ports[port];
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
u8 ts_id;
|
u8 ts_id;
|
||||||
|
|
||||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||||
|
|
||||||
spin_lock(&sp->data->meta_lock);
|
spin_lock(&priv->ts_id_lock);
|
||||||
|
|
||||||
ts_id = sp->data->ts_id;
|
ts_id = priv->ts_id;
|
||||||
/* Deal automatically with 8-bit wraparound */
|
/* Deal automatically with 8-bit wraparound */
|
||||||
sp->data->ts_id++;
|
priv->ts_id++;
|
||||||
|
|
||||||
SJA1105_SKB_CB(clone)->ts_id = ts_id;
|
SJA1105_SKB_CB(clone)->ts_id = ts_id;
|
||||||
|
|
||||||
spin_unlock(&sp->data->meta_lock);
|
spin_unlock(&priv->ts_id_lock);
|
||||||
|
|
||||||
skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
|
skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
|
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
|
||||||
@ -486,10 +516,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
|||||||
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_port *sp = &priv->ports[port];
|
|
||||||
struct sk_buff *clone;
|
struct sk_buff *clone;
|
||||||
|
|
||||||
if (!sp->hwts_tx_en)
|
if (!(priv->hwts_tx_en & BIT(port)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clone = skb_clone_sk(skb);
|
clone = skb_clone_sk(skb);
|
||||||
@ -896,7 +925,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = {
|
|||||||
int sja1105_ptp_clock_register(struct dsa_switch *ds)
|
int sja1105_ptp_clock_register(struct dsa_switch *ds)
|
||||||
{
|
{
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
|
|
||||||
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
|
|
||||||
ptp_data->caps = (struct ptp_clock_info) {
|
ptp_data->caps = (struct ptp_clock_info) {
|
||||||
@ -919,8 +947,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
|
|||||||
/* Only used on SJA1105 */
|
/* Only used on SJA1105 */
|
||||||
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
|
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
|
||||||
/* Only used on SJA1110 */
|
/* Only used on SJA1110 */
|
||||||
skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
|
skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
|
||||||
spin_lock_init(&tagger_data->meta_lock);
|
|
||||||
|
|
||||||
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
|
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
|
||||||
if (IS_ERR_OR_NULL(ptp_data->clock))
|
if (IS_ERR_OR_NULL(ptp_data->clock))
|
||||||
@ -937,7 +964,6 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
|
|||||||
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
|
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
|
||||||
{
|
{
|
||||||
struct sja1105_private *priv = ds->priv;
|
struct sja1105_private *priv = ds->priv;
|
||||||
struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
|
|
||||||
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(ptp_data->clock))
|
if (IS_ERR_OR_NULL(ptp_data->clock))
|
||||||
@ -945,7 +971,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
|
|||||||
|
|
||||||
del_timer_sync(&ptp_data->extts_timer);
|
del_timer_sync(&ptp_data->extts_timer);
|
||||||
ptp_cancel_worker_sync(ptp_data->clock);
|
ptp_cancel_worker_sync(ptp_data->clock);
|
||||||
skb_queue_purge(&tagger_data->skb_txtstamp_queue);
|
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
|
||||||
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
|
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
|
||||||
ptp_clock_unregister(ptp_data->clock);
|
ptp_clock_unregister(ptp_data->clock);
|
||||||
ptp_data->clock = NULL;
|
ptp_data->clock = NULL;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user