blob: 833fd133d15bd88a2f03641445302fdb10613370 [file] [log] [blame]
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <net/tso.h>
#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
#include "queue/tx.h"
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
/*
* iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
* @cmd: a pointer to the ucode command structure
*
* The function returns < 0 values to indicate the operation
* failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int i, cmd_pos, idx;
u16 copy_size, cmd_size, tb0_size;
bool had_nocopy = false;
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_tfh_tfd *tfd;
unsigned long flags;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
cmddata[i] = cmd->data[i];
cmdlen[i] = cmd->len[i];
if (!cmd->len[i])
continue;
/* need at least IWL_FIRST_TB_SIZE copied */
if (copy_size < IWL_FIRST_TB_SIZE) {
int copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmdlen[i])
copy = cmdlen[i];
cmdlen[i] -= copy;
cmddata[i] += copy;
copy_size += copy;
}
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
idx = -EINVAL;
goto free_dup_buf;
}
} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
/*
* This is also a chunk that isn't copied
* to the static buffer so set had_nocopy.
*/
had_nocopy = true;
/* only allowed once */
if (WARN_ON(dup_buf)) {
idx = -EINVAL;
goto free_dup_buf;
}
dup_buf = kmemdup(cmddata[i], cmdlen[i],
GFP_ATOMIC);
if (!dup_buf)
return -ENOMEM;
} else {
/* NOCOPY must not be followed by normal! */
if (WARN_ON(had_nocopy)) {
idx = -EINVAL;
goto free_dup_buf;
}
copy_size += cmdlen[i];
}
cmd_size += cmd->len[i];
}
/*
* If any of the command structures end up being larger than the
* TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
* separate TFDs, then we will need to increase the size of the buffers
*/
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
"Command %s (%#x) is too large (%d bytes)\n",
iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
idx = -EINVAL;
goto free_dup_buf;
}
spin_lock_irqsave(&txq->lock, flags);
idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
idx = -ENOSPC;
goto free_dup_buf;
}
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
/* re-initialize to NULL */
memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
/* set up the header */
out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr_wide.group_id = group_id;
out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
out_cmd->hdr_wide.length =
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
/* and copy the data that needs to be copied */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy;
if (!cmd->len[i])
continue;
/* copy everything if not nocopy/dup */
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP))) {
copy = cmd->len[i];
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
cmd_pos += copy;
copy_size += copy;
continue;
}
/*
* Otherwise we need at least IWL_FIRST_TB_SIZE copied
* in total (for bi-directional DMA), but copy up to what
* we can fit into the payload for debug dump purposes.
*/
copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
cmd_pos += copy;
/* However, treat copy_size the proper way, we need it below */
if (copy_size < IWL_FIRST_TB_SIZE) {
copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmd->len[i])
copy = cmd->len[i];
copy_size += copy;
}
}
IWL_DEBUG_HC(trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
(u8 *)out_cmd + tb0_size,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
copy_size - tb0_size);
}
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
const void *data = cmddata[i];
if (!cmdlen[i])
continue;
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP)))
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
}
BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kfree_sensitive(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock(&trans_pcie->reg_lock);
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_txq_inc_wr_ptr(trans, txq);
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
return idx;
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status),
"Command %s: a command is already active!\n", cmd_str))
return -EIO;
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
cmd_str, ret);
return ret;
}
ret = wait_event_timeout(trans_pcie->wait_command_queue,
!test_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
cmd_str);
ret = -ETIMEDOUT;
iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
ret = -EIO;
goto cancel;
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
}
if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
ret = -EIO;
goto cancel;
}
return 0;
cancel:
if (cmd->flags & CMD_WANT_SKB) {
/*
* Cancel the CMD_WANT_SKB flag for the cmd in the
* TX cmd queue. Otherwise in case the cmd comes
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
}
if (cmd->resp_pkt) {
iwl_free_resp(cmd);
cmd->resp_pkt = NULL;
}
return ret;
}
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
}
if (cmd->flags & CMD_ASYNC) {
int ret;
/* An asynchronous command can not expect an SKB to be set. */
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
if (ret < 0) {
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
iwl_get_cmd_string(trans, cmd->id), ret);
return ret;
}
return 0;
}
return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
}