forked from Qortal/Brooklyn
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
259 lines
7.2 KiB
259 lines
7.2 KiB
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
|
/* |
|
* Copyright (C) 2017 Intel Deutschland GmbH |
|
* Copyright (C) 2018-2020 Intel Corporation |
|
*/ |
|
#include <net/tso.h> |
|
#include <linux/tcp.h> |
|
|
|
#include "iwl-debug.h" |
|
#include "iwl-csr.h" |
|
#include "iwl-io.h" |
|
#include "internal.h" |
|
#include "fw/api/tx.h" |
|
#include "queue/tx.h" |
|
|
|
/*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
|
|
|
/* |
|
* iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command |
|
* @priv: device private data point |
|
* @cmd: a pointer to the ucode command structure |
|
* |
|
* The function returns < 0 values to indicate the operation |
|
* failed. On success, it returns the index (>= 0) of command in the |
|
* command queue. |
|
*/ |
|
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, |
|
struct iwl_host_cmd *cmd) |
|
{ |
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
|
struct iwl_device_cmd *out_cmd; |
|
struct iwl_cmd_meta *out_meta; |
|
void *dup_buf = NULL; |
|
dma_addr_t phys_addr; |
|
int i, cmd_pos, idx; |
|
u16 copy_size, cmd_size, tb0_size; |
|
bool had_nocopy = false; |
|
u8 group_id = iwl_cmd_groupid(cmd->id); |
|
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; |
|
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; |
|
struct iwl_tfh_tfd *tfd; |
|
unsigned long flags; |
|
|
|
copy_size = sizeof(struct iwl_cmd_header_wide); |
|
cmd_size = sizeof(struct iwl_cmd_header_wide); |
|
|
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
|
cmddata[i] = cmd->data[i]; |
|
cmdlen[i] = cmd->len[i]; |
|
|
|
if (!cmd->len[i]) |
|
continue; |
|
|
|
/* need at least IWL_FIRST_TB_SIZE copied */ |
|
if (copy_size < IWL_FIRST_TB_SIZE) { |
|
int copy = IWL_FIRST_TB_SIZE - copy_size; |
|
|
|
if (copy > cmdlen[i]) |
|
copy = cmdlen[i]; |
|
cmdlen[i] -= copy; |
|
cmddata[i] += copy; |
|
copy_size += copy; |
|
} |
|
|
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { |
|
had_nocopy = true; |
|
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { |
|
idx = -EINVAL; |
|
goto free_dup_buf; |
|
} |
|
} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { |
|
/* |
|
* This is also a chunk that isn't copied |
|
* to the static buffer so set had_nocopy. |
|
*/ |
|
had_nocopy = true; |
|
|
|
/* only allowed once */ |
|
if (WARN_ON(dup_buf)) { |
|
idx = -EINVAL; |
|
goto free_dup_buf; |
|
} |
|
|
|
dup_buf = kmemdup(cmddata[i], cmdlen[i], |
|
GFP_ATOMIC); |
|
if (!dup_buf) |
|
return -ENOMEM; |
|
} else { |
|
/* NOCOPY must not be followed by normal! */ |
|
if (WARN_ON(had_nocopy)) { |
|
idx = -EINVAL; |
|
goto free_dup_buf; |
|
} |
|
copy_size += cmdlen[i]; |
|
} |
|
cmd_size += cmd->len[i]; |
|
} |
|
|
|
/* |
|
* If any of the command structures end up being larger than the |
|
* TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into |
|
* separate TFDs, then we will need to increase the size of the buffers |
|
*/ |
|
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, |
|
"Command %s (%#x) is too large (%d bytes)\n", |
|
iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { |
|
idx = -EINVAL; |
|
goto free_dup_buf; |
|
} |
|
|
|
spin_lock_irqsave(&txq->lock, flags); |
|
|
|
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); |
|
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); |
|
memset(tfd, 0, sizeof(*tfd)); |
|
|
|
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
|
spin_unlock_irqrestore(&txq->lock, flags); |
|
|
|
IWL_ERR(trans, "No space in command queue\n"); |
|
iwl_op_mode_cmd_queue_full(trans->op_mode); |
|
idx = -ENOSPC; |
|
goto free_dup_buf; |
|
} |
|
|
|
out_cmd = txq->entries[idx].cmd; |
|
out_meta = &txq->entries[idx].meta; |
|
|
|
/* re-initialize to NULL */ |
|
memset(out_meta, 0, sizeof(*out_meta)); |
|
if (cmd->flags & CMD_WANT_SKB) |
|
out_meta->source = cmd; |
|
|
|
/* set up the header */ |
|
out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); |
|
out_cmd->hdr_wide.group_id = group_id; |
|
out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); |
|
out_cmd->hdr_wide.length = |
|
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); |
|
out_cmd->hdr_wide.reserved = 0; |
|
out_cmd->hdr_wide.sequence = |
|
cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | |
|
INDEX_TO_SEQ(txq->write_ptr)); |
|
|
|
cmd_pos = sizeof(struct iwl_cmd_header_wide); |
|
copy_size = sizeof(struct iwl_cmd_header_wide); |
|
|
|
/* and copy the data that needs to be copied */ |
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
|
int copy; |
|
|
|
if (!cmd->len[i]) |
|
continue; |
|
|
|
/* copy everything if not nocopy/dup */ |
|
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | |
|
IWL_HCMD_DFL_DUP))) { |
|
copy = cmd->len[i]; |
|
|
|
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); |
|
cmd_pos += copy; |
|
copy_size += copy; |
|
continue; |
|
} |
|
|
|
/* |
|
* Otherwise we need at least IWL_FIRST_TB_SIZE copied |
|
* in total (for bi-directional DMA), but copy up to what |
|
* we can fit into the payload for debug dump purposes. |
|
*/ |
|
copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); |
|
|
|
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); |
|
cmd_pos += copy; |
|
|
|
/* However, treat copy_size the proper way, we need it below */ |
|
if (copy_size < IWL_FIRST_TB_SIZE) { |
|
copy = IWL_FIRST_TB_SIZE - copy_size; |
|
|
|
if (copy > cmd->len[i]) |
|
copy = cmd->len[i]; |
|
copy_size += copy; |
|
} |
|
} |
|
|
|
IWL_DEBUG_HC(trans, |
|
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
|
iwl_get_cmd_string(trans, cmd->id), group_id, |
|
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
|
cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); |
|
|
|
/* start the TFD with the minimum copy bytes */ |
|
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); |
|
memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); |
|
iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx), |
|
tb0_size); |
|
|
|
/* map first command fragment, if any remains */ |
|
if (copy_size > tb0_size) { |
|
phys_addr = dma_map_single(trans->dev, |
|
(u8 *)out_cmd + tb0_size, |
|
copy_size - tb0_size, |
|
DMA_TO_DEVICE); |
|
if (dma_mapping_error(trans->dev, phys_addr)) { |
|
idx = -ENOMEM; |
|
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); |
|
goto out; |
|
} |
|
iwl_txq_gen2_set_tb(trans, tfd, phys_addr, |
|
copy_size - tb0_size); |
|
} |
|
|
|
/* map the remaining (adjusted) nocopy/dup fragments */ |
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
|
const void *data = cmddata[i]; |
|
|
|
if (!cmdlen[i]) |
|
continue; |
|
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | |
|
IWL_HCMD_DFL_DUP))) |
|
continue; |
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) |
|
data = dup_buf; |
|
phys_addr = dma_map_single(trans->dev, (void *)data, |
|
cmdlen[i], DMA_TO_DEVICE); |
|
if (dma_mapping_error(trans->dev, phys_addr)) { |
|
idx = -ENOMEM; |
|
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); |
|
goto out; |
|
} |
|
iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); |
|
} |
|
|
|
BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); |
|
out_meta->flags = cmd->flags; |
|
if (WARN_ON_ONCE(txq->entries[idx].free_buf)) |
|
kfree_sensitive(txq->entries[idx].free_buf); |
|
txq->entries[idx].free_buf = dup_buf; |
|
|
|
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); |
|
|
|
/* start timer if queue currently empty */ |
|
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) |
|
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
|
|
|
spin_lock(&trans_pcie->reg_lock); |
|
/* Increment and update queue's write index */ |
|
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); |
|
iwl_txq_inc_wr_ptr(trans, txq); |
|
spin_unlock(&trans_pcie->reg_lock); |
|
|
|
out: |
|
spin_unlock_irqrestore(&txq->lock, flags); |
|
free_dup_buf: |
|
if (idx < 0) |
|
kfree(dup_buf); |
|
return idx; |
|
}
|
|
|