|  | // SPDX-License-Identifier: GPL-2.0-only | 
|  | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ | 
|  | #include <uapi/linux/cxl_mem.h> | 
|  | #include <linux/security.h> | 
|  | #include <linux/debugfs.h> | 
|  | #include <linux/module.h> | 
|  | #include <linux/sizes.h> | 
|  | #include <linux/mutex.h> | 
|  | #include <linux/list.h> | 
|  | #include <linux/cdev.h> | 
|  | #include <linux/idr.h> | 
|  | #include <linux/pci.h> | 
|  | #include <linux/io.h> | 
|  | #include <linux/io-64-nonatomic-lo-hi.h> | 
|  | #include "cxlmem.h" | 
|  | #include "pci.h" | 
|  | #include "cxl.h" | 
|  |  | 
|  | /** | 
|  | * DOC: cxl pci | 
|  | * | 
|  | * This implements the PCI exclusive functionality for a CXL device as it is | 
|  | * defined by the Compute Express Link specification. CXL devices may surface | 
|  | * certain functionality even if it isn't CXL enabled. | 
|  | * | 
|  | * The driver has several responsibilities, mainly: | 
|  | *  - Create the memX device and register on the CXL bus. | 
|  | *  - Enumerate device's register interface and map them. | 
|  | *  - Probe the device attributes to establish sysfs interface. | 
|  | *  - Provide an IOCTL interface to userspace to communicate with the device for | 
|  | *    things like firmware update. | 
|  | */ | 
|  |  | 
|  | #define cxl_doorbell_busy(cxlm)                                                \ | 
|  | (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) &                  \ | 
|  | CXLDEV_MBOX_CTRL_DOORBELL) | 
|  |  | 
|  | /* CXL 2.0 - 8.2.8.4 */ | 
|  | #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) | 
|  |  | 
|  | enum opcode { | 
|  | CXL_MBOX_OP_INVALID		= 0x0000, | 
|  | CXL_MBOX_OP_RAW			= CXL_MBOX_OP_INVALID, | 
|  | CXL_MBOX_OP_GET_FW_INFO		= 0x0200, | 
|  | CXL_MBOX_OP_ACTIVATE_FW		= 0x0202, | 
|  | CXL_MBOX_OP_GET_SUPPORTED_LOGS	= 0x0400, | 
|  | CXL_MBOX_OP_GET_LOG		= 0x0401, | 
|  | CXL_MBOX_OP_IDENTIFY		= 0x4000, | 
|  | CXL_MBOX_OP_GET_PARTITION_INFO	= 0x4100, | 
|  | CXL_MBOX_OP_SET_PARTITION_INFO	= 0x4101, | 
|  | CXL_MBOX_OP_GET_LSA		= 0x4102, | 
|  | CXL_MBOX_OP_SET_LSA		= 0x4103, | 
|  | CXL_MBOX_OP_GET_HEALTH_INFO	= 0x4200, | 
|  | CXL_MBOX_OP_GET_ALERT_CONFIG	= 0x4201, | 
|  | CXL_MBOX_OP_SET_ALERT_CONFIG	= 0x4202, | 
|  | CXL_MBOX_OP_GET_SHUTDOWN_STATE	= 0x4203, | 
|  | CXL_MBOX_OP_SET_SHUTDOWN_STATE	= 0x4204, | 
|  | CXL_MBOX_OP_GET_POISON		= 0x4300, | 
|  | CXL_MBOX_OP_INJECT_POISON	= 0x4301, | 
|  | CXL_MBOX_OP_CLEAR_POISON	= 0x4302, | 
|  | CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS	= 0x4303, | 
|  | CXL_MBOX_OP_SCAN_MEDIA		= 0x4304, | 
|  | CXL_MBOX_OP_GET_SCAN_MEDIA	= 0x4305, | 
|  | CXL_MBOX_OP_MAX			= 0x10000 | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * CXL 2.0 - Memory capacity multiplier | 
|  | * See Section 8.2.9.5 | 
|  | * | 
|  | * Volatile, Persistent, and Partition capacities are specified to be in | 
|  | * multiples of 256MB - define a multiplier to convert to/from bytes. | 
|  | */ | 
|  | #define CXL_CAPACITY_MULTIPLIER SZ_256M | 
|  |  | 
|  | /** | 
|  | * struct mbox_cmd - A command to be submitted to hardware. | 
|  | * @opcode: (input) The command set and command submitted to hardware. | 
|  | * @payload_in: (input) Pointer to the input payload. | 
|  | * @payload_out: (output) Pointer to the output payload. Must be allocated by | 
|  | *		 the caller. | 
|  | * @size_in: (input) Number of bytes to load from @payload_in. | 
|  | * @size_out: (input) Max number of bytes loaded into @payload_out. | 
|  | *            (output) Number of bytes generated by the device. For fixed size | 
|  | *            outputs commands this is always expected to be deterministic. For | 
|  | *            variable sized output commands, it tells the exact number of bytes | 
|  | *            written. | 
|  | * @return_code: (output) Error code returned from hardware. | 
|  | * | 
|  | * This is the primary mechanism used to send commands to the hardware. | 
|  | * All the fields except @payload_* correspond exactly to the fields described in | 
|  | * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and | 
|  | * @payload_out are written to, and read from the Command Payload Registers | 
|  | * defined in CXL 2.0 8.2.8.4.8. | 
|  | */ | 
|  | struct mbox_cmd { | 
|  | u16 opcode; | 
|  | void *payload_in; | 
|  | void *payload_out; | 
|  | size_t size_in; | 
|  | size_t size_out; | 
|  | u16 return_code; | 
|  | #define CXL_MBOX_SUCCESS 0 | 
|  | }; | 
|  |  | 
|  | static DECLARE_RWSEM(cxl_memdev_rwsem); | 
|  | static struct dentry *cxl_debugfs; | 
|  | static bool cxl_raw_allow_all; | 
|  |  | 
|  | enum { | 
|  | CEL_UUID, | 
|  | VENDOR_DEBUG_UUID, | 
|  | }; | 
|  |  | 
|  | /* See CXL 2.0 Table 170. Get Log Input Payload */ | 
|  | static const uuid_t log_uuid[] = { | 
|  | [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, | 
|  | 0xb1, 0x62, 0x3b, 0x3f, 0x17), | 
|  | [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, | 
|  | 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), | 
|  | }; | 
|  |  | 
|  | /** | 
|  | * struct cxl_mem_command - Driver representation of a memory device command | 
|  | * @info: Command information as it exists for the UAPI | 
|  | * @opcode: The actual bits used for the mailbox protocol | 
|  | * @flags: Set of flags effecting driver behavior. | 
|  | * | 
|  | *  * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag | 
|  | *    will be enabled by the driver regardless of what hardware may have | 
|  | *    advertised. | 
|  | * | 
|  | * The cxl_mem_command is the driver's internal representation of commands that | 
|  | * are supported by the driver. Some of these commands may not be supported by | 
|  | * the hardware. The driver will use @info to validate the fields passed in by | 
|  | * the user then submit the @opcode to the hardware. | 
|  | * | 
|  | * See struct cxl_command_info. | 
|  | */ | 
|  | struct cxl_mem_command { | 
|  | struct cxl_command_info info; | 
|  | enum opcode opcode; | 
|  | u32 flags; | 
|  | #define CXL_CMD_FLAG_NONE 0 | 
|  | #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) | 
|  | }; | 
|  |  | 
|  | #define CXL_CMD(_id, sin, sout, _flags)                                        \ | 
|  | [CXL_MEM_COMMAND_ID_##_id] = {                                         \ | 
|  | .info =	{                                                              \ | 
|  | .id = CXL_MEM_COMMAND_ID_##_id,                        \ | 
|  | .size_in = sin,                                        \ | 
|  | .size_out = sout,                                      \ | 
|  | },                                                             \ | 
|  | .opcode = CXL_MBOX_OP_##_id,                                           \ | 
|  | .flags = _flags,                                                       \ | 
|  | } | 
|  |  | 
|  | /* | 
|  | * This table defines the supported mailbox commands for the driver. This table | 
|  | * is made up of a UAPI structure. Non-negative values as parameters in the | 
|  | * table will be validated against the user's input. For example, if size_in is | 
|  | * 0, and the user passed in 1, it is an error. | 
|  | */ | 
|  | static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { | 
|  | CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), | 
|  | #ifdef CONFIG_CXL_MEM_RAW_COMMANDS | 
|  | CXL_CMD(RAW, ~0, ~0, 0), | 
|  | #endif | 
|  | CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), | 
|  | CXL_CMD(GET_FW_INFO, 0, 0x50, 0), | 
|  | CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), | 
|  | CXL_CMD(GET_LSA, 0x8, ~0, 0), | 
|  | CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), | 
|  | CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), | 
|  | CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), | 
|  | CXL_CMD(SET_LSA, ~0, 0, 0), | 
|  | CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), | 
|  | CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), | 
|  | CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), | 
|  | CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), | 
|  | CXL_CMD(GET_POISON, 0x10, ~0, 0), | 
|  | CXL_CMD(INJECT_POISON, 0x8, 0, 0), | 
|  | CXL_CMD(CLEAR_POISON, 0x48, 0, 0), | 
|  | CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), | 
|  | CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), | 
|  | CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * Commands that RAW doesn't permit. The rationale for each: | 
|  | * | 
|  | * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / | 
|  | * coordination of transaction timeout values at the root bridge level. | 
|  | * | 
|  | * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live | 
|  | * and needs to be coordinated with HDM updates. | 
|  | * | 
|  | * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the | 
|  | * driver and any writes from userspace invalidates those contents. | 
|  | * | 
|  | * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes | 
|  | * to the device after it is marked clean, userspace can not make that | 
|  | * assertion. | 
|  | * | 
|  | * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that | 
|  | * is kept up to date with patrol notifications and error management. | 
|  | */ | 
|  | static u16 cxl_disabled_raw_commands[] = { | 
|  | CXL_MBOX_OP_ACTIVATE_FW, | 
|  | CXL_MBOX_OP_SET_PARTITION_INFO, | 
|  | CXL_MBOX_OP_SET_LSA, | 
|  | CXL_MBOX_OP_SET_SHUTDOWN_STATE, | 
|  | CXL_MBOX_OP_SCAN_MEDIA, | 
|  | CXL_MBOX_OP_GET_SCAN_MEDIA, | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * Command sets that RAW doesn't permit. All opcodes in this set are | 
|  | * disabled because they pass plain text security payloads over the | 
|  | * user/kernel boundary. This functionality is intended to be wrapped | 
|  | * behind the keys ABI which allows for encrypted payloads in the UAPI | 
|  | */ | 
|  | static u8 security_command_sets[] = { | 
|  | 0x44, /* Sanitize */ | 
|  | 0x45, /* Persistent Memory Data-at-rest Security */ | 
|  | 0x46, /* Security Passthrough */ | 
|  | }; | 
|  |  | 
|  | #define cxl_for_each_cmd(cmd)                                                  \ | 
|  | for ((cmd) = &mem_commands[0];                                         \ | 
|  | ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) | 
|  |  | 
|  | #define cxl_cmd_count ARRAY_SIZE(mem_commands) | 
|  |  | 
|  | static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) | 
|  | { | 
|  | const unsigned long start = jiffies; | 
|  | unsigned long end = start; | 
|  |  | 
|  | while (cxl_doorbell_busy(cxlm)) { | 
|  | end = jiffies; | 
|  |  | 
|  | if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { | 
|  | /* Check again in case preempted before timeout test */ | 
|  | if (!cxl_doorbell_busy(cxlm)) | 
|  | break; | 
|  | return -ETIMEDOUT; | 
|  | } | 
|  | cpu_relax(); | 
|  | } | 
|  |  | 
|  | dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms", | 
|  | jiffies_to_msecs(end) - jiffies_to_msecs(start)); | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static bool cxl_is_security_command(u16 opcode) | 
|  | { | 
|  | int i; | 
|  |  | 
|  | for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) | 
|  | if (security_command_sets[i] == (opcode >> 8)) | 
|  | return true; | 
|  | return false; | 
|  | } | 
|  |  | 
|  | static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, | 
|  | struct mbox_cmd *mbox_cmd) | 
|  | { | 
|  | struct device *dev = &cxlm->pdev->dev; | 
|  |  | 
|  | dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", | 
|  | mbox_cmd->opcode, mbox_cmd->size_in); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * __cxl_mem_mbox_send_cmd() - Execute a mailbox command | 
|  | * @cxlm: The CXL memory device to communicate with. | 
|  | * @mbox_cmd: Command to send to the memory device. | 
|  | * | 
|  | * Context: Any context. Expects mbox_mutex to be held. | 
|  | * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. | 
|  | *         Caller should check the return code in @mbox_cmd to make sure it | 
|  | *         succeeded. | 
|  | * | 
|  | * This is a generic form of the CXL mailbox send command thus only using the | 
|  | * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory | 
|  | * devices, and perhaps other types of CXL devices may have further information | 
|  | * available upon error conditions. Driver facilities wishing to send mailbox | 
|  | * commands should use the wrapper command. | 
|  | * | 
|  | * The CXL spec allows for up to two mailboxes. The intention is for the primary | 
|  | * mailbox to be OS controlled and the secondary mailbox to be used by system | 
|  | * firmware. This allows the OS and firmware to communicate with the device and | 
|  | * not need to coordinate with each other. The driver only uses the primary | 
|  | * mailbox. | 
|  | */ | 
|  | static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, | 
|  | struct mbox_cmd *mbox_cmd) | 
|  | { | 
|  | void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; | 
|  | u64 cmd_reg, status_reg; | 
|  | size_t out_len; | 
|  | int rc; | 
|  |  | 
|  | lockdep_assert_held(&cxlm->mbox_mutex); | 
|  |  | 
|  | /* | 
|  | * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. | 
|  | *   1. Caller reads MB Control Register to verify doorbell is clear | 
|  | *   2. Caller writes Command Register | 
|  | *   3. Caller writes Command Payload Registers if input payload is non-empty | 
|  | *   4. Caller writes MB Control Register to set doorbell | 
|  | *   5. Caller either polls for doorbell to be clear or waits for interrupt if configured | 
|  | *   6. Caller reads MB Status Register to fetch Return code | 
|  | *   7. If command successful, Caller reads Command Register to get Payload Length | 
|  | *   8. If output payload is non-empty, host reads Command Payload Registers | 
|  | * | 
|  | * Hardware is free to do whatever it wants before the doorbell is rung, | 
|  | * and isn't allowed to change anything after it clears the doorbell. As | 
|  | * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can | 
|  | * also happen in any order (though some orders might not make sense). | 
|  | */ | 
|  |  | 
|  | /* #1 */ | 
|  | if (cxl_doorbell_busy(cxlm)) { | 
|  | dev_err_ratelimited(&cxlm->pdev->dev, | 
|  | "Mailbox re-busy after acquiring\n"); | 
|  | return -EBUSY; | 
|  | } | 
|  |  | 
|  | cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, | 
|  | mbox_cmd->opcode); | 
|  | if (mbox_cmd->size_in) { | 
|  | if (WARN_ON(!mbox_cmd->payload_in)) | 
|  | return -EINVAL; | 
|  |  | 
|  | cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, | 
|  | mbox_cmd->size_in); | 
|  | memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); | 
|  | } | 
|  |  | 
|  | /* #2, #3 */ | 
|  | writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); | 
|  |  | 
|  | /* #4 */ | 
|  | dev_dbg(&cxlm->pdev->dev, "Sending command\n"); | 
|  | writel(CXLDEV_MBOX_CTRL_DOORBELL, | 
|  | cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); | 
|  |  | 
|  | /* #5 */ | 
|  | rc = cxl_mem_wait_for_doorbell(cxlm); | 
|  | if (rc == -ETIMEDOUT) { | 
|  | cxl_mem_mbox_timeout(cxlm, mbox_cmd); | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | /* #6 */ | 
|  | status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); | 
|  | mbox_cmd->return_code = | 
|  | FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); | 
|  |  | 
|  | if (mbox_cmd->return_code != 0) { | 
|  | dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n"); | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /* #7 */ | 
|  | cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); | 
|  | out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); | 
|  |  | 
|  | /* #8 */ | 
|  | if (out_len && mbox_cmd->payload_out) { | 
|  | /* | 
|  | * Sanitize the copy. If hardware misbehaves, out_len per the | 
|  | * spec can actually be greater than the max allowed size (21 | 
|  | * bits available but spec defined 1M max). The caller also may | 
|  | * have requested less data than the hardware supplied even | 
|  | * within spec. | 
|  | */ | 
|  | size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); | 
|  |  | 
|  | memcpy_fromio(mbox_cmd->payload_out, payload, n); | 
|  | mbox_cmd->size_out = n; | 
|  | } else { | 
|  | mbox_cmd->size_out = 0; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. | 
|  | * @cxlm: The memory device to gain access to. | 
|  | * | 
|  | * Context: Any context. Takes the mbox_mutex. | 
|  | * Return: 0 if exclusive access was acquired. | 
|  | */ | 
|  | static int cxl_mem_mbox_get(struct cxl_mem *cxlm) | 
|  | { | 
|  | struct device *dev = &cxlm->pdev->dev; | 
|  | u64 md_status; | 
|  | int rc; | 
|  |  | 
|  | mutex_lock_io(&cxlm->mbox_mutex); | 
|  |  | 
|  | /* | 
|  | * XXX: There is some amount of ambiguity in the 2.0 version of the spec | 
|  | * around the mailbox interface ready (8.2.8.5.1.1).  The purpose of the | 
|  | * bit is to allow firmware running on the device to notify the driver | 
|  | * that it's ready to receive commands. It is unclear if the bit needs | 
|  | * to be read for each transaction mailbox, ie. the firmware can switch | 
|  | * it on and off as needed. Second, there is no defined timeout for | 
|  | * mailbox ready, like there is for the doorbell interface. | 
|  | * | 
|  | * Assumptions: | 
|  | * 1. The firmware might toggle the Mailbox Interface Ready bit, check | 
|  | *    it for every command. | 
|  | * | 
|  | * 2. If the doorbell is clear, the firmware should have first set the | 
|  | *    Mailbox Interface Ready bit. Therefore, waiting for the doorbell | 
|  | *    to be ready is sufficient. | 
|  | */ | 
|  | rc = cxl_mem_wait_for_doorbell(cxlm); | 
|  | if (rc) { | 
|  | dev_warn(dev, "Mailbox interface not ready\n"); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); | 
|  | if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { | 
|  | dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); | 
|  | rc = -EBUSY; | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Hardware shouldn't allow a ready status but also have failure bits | 
|  | * set. Spit out an error, this should be a bug report | 
|  | */ | 
|  | rc = -EFAULT; | 
|  | if (md_status & CXLMDEV_DEV_FATAL) { | 
|  | dev_err(dev, "mbox: reported ready, but fatal\n"); | 
|  | goto out; | 
|  | } | 
|  | if (md_status & CXLMDEV_FW_HALT) { | 
|  | dev_err(dev, "mbox: reported ready, but halted\n"); | 
|  | goto out; | 
|  | } | 
|  | if (CXLMDEV_RESET_NEEDED(md_status)) { | 
|  | dev_err(dev, "mbox: reported ready, but reset needed\n"); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | /* with lock held */ | 
|  | return 0; | 
|  |  | 
|  | out: | 
|  | mutex_unlock(&cxlm->mbox_mutex); | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_mbox_put() - Release exclusive access to the mailbox. | 
|  | * @cxlm: The CXL memory device to communicate with. | 
|  | * | 
|  | * Context: Any context. Expects mbox_mutex to be held. | 
|  | */ | 
|  | static void cxl_mem_mbox_put(struct cxl_mem *cxlm) | 
|  | { | 
|  | mutex_unlock(&cxlm->mbox_mutex); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. | 
|  | * @cxlm: The CXL memory device to communicate with. | 
|  | * @cmd: The validated command. | 
|  | * @in_payload: Pointer to userspace's input payload. | 
|  | * @out_payload: Pointer to userspace's output payload. | 
|  | * @size_out: (Input) Max payload size to copy out. | 
|  | *            (Output) Payload size hardware generated. | 
|  | * @retval: Hardware generated return code from the operation. | 
|  | * | 
|  | * Return: | 
|  | *  * %0	- Mailbox transaction succeeded. This implies the mailbox | 
|  | *		  protocol completed successfully not that the operation itself | 
|  | *		  was successful. | 
|  | *  * %-ENOMEM  - Couldn't allocate a bounce buffer. | 
|  | *  * %-EFAULT	- Something happened with copy_to/from_user. | 
|  | *  * %-EINTR	- Mailbox acquisition interrupted. | 
|  | *  * %-EXXX	- Transaction level failures. | 
|  | * | 
|  | * Creates the appropriate mailbox command and dispatches it on behalf of a | 
|  | * userspace request. The input and output payloads are copied between | 
|  | * userspace. | 
|  | * | 
|  | * See cxl_send_cmd(). | 
|  | */ | 
|  | static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, | 
|  | const struct cxl_mem_command *cmd, | 
|  | u64 in_payload, u64 out_payload, | 
|  | s32 *size_out, u32 *retval) | 
|  | { | 
|  | struct device *dev = &cxlm->pdev->dev; | 
|  | struct mbox_cmd mbox_cmd = { | 
|  | .opcode = cmd->opcode, | 
|  | .size_in = cmd->info.size_in, | 
|  | .size_out = cmd->info.size_out, | 
|  | }; | 
|  | int rc; | 
|  |  | 
|  | if (cmd->info.size_out) { | 
|  | mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); | 
|  | if (!mbox_cmd.payload_out) | 
|  | return -ENOMEM; | 
|  | } | 
|  |  | 
|  | if (cmd->info.size_in) { | 
|  | mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), | 
|  | cmd->info.size_in); | 
|  | if (IS_ERR(mbox_cmd.payload_in)) { | 
|  | kvfree(mbox_cmd.payload_out); | 
|  | return PTR_ERR(mbox_cmd.payload_in); | 
|  | } | 
|  | } | 
|  |  | 
|  | rc = cxl_mem_mbox_get(cxlm); | 
|  | if (rc) | 
|  | goto out; | 
|  |  | 
|  | dev_dbg(dev, | 
|  | "Submitting %s command for user\n" | 
|  | "\topcode: %x\n" | 
|  | "\tsize: %ub\n", | 
|  | cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, | 
|  | cmd->info.size_in); | 
|  |  | 
|  | dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, | 
|  | "raw command path used\n"); | 
|  |  | 
|  | rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); | 
|  | cxl_mem_mbox_put(cxlm); | 
|  | if (rc) | 
|  | goto out; | 
|  |  | 
|  | /* | 
|  | * @size_out contains the max size that's allowed to be written back out | 
|  | * to userspace. While the payload may have written more output than | 
|  | * this it will have to be ignored. | 
|  | */ | 
|  | if (mbox_cmd.size_out) { | 
|  | dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, | 
|  | "Invalid return size\n"); | 
|  | if (copy_to_user(u64_to_user_ptr(out_payload), | 
|  | mbox_cmd.payload_out, mbox_cmd.size_out)) { | 
|  | rc = -EFAULT; | 
|  | goto out; | 
|  | } | 
|  | } | 
|  |  | 
|  | *size_out = mbox_cmd.size_out; | 
|  | *retval = mbox_cmd.return_code; | 
|  |  | 
|  | out: | 
|  | kvfree(mbox_cmd.payload_in); | 
|  | kvfree(mbox_cmd.payload_out); | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | static bool cxl_mem_raw_command_allowed(u16 opcode) | 
|  | { | 
|  | int i; | 
|  |  | 
|  | if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) | 
|  | return false; | 
|  |  | 
|  | if (security_locked_down(LOCKDOWN_PCI_ACCESS)) | 
|  | return false; | 
|  |  | 
|  | if (cxl_raw_allow_all) | 
|  | return true; | 
|  |  | 
|  | if (cxl_is_security_command(opcode)) | 
|  | return false; | 
|  |  | 
|  | for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) | 
|  | if (cxl_disabled_raw_commands[i] == opcode) | 
|  | return false; | 
|  |  | 
|  | return true; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. | 
|  | * @cxlm: &struct cxl_mem device whose mailbox will be used. | 
|  | * @send_cmd: &struct cxl_send_command copied in from userspace. | 
|  | * @out_cmd: Sanitized and populated &struct cxl_mem_command. | 
|  | * | 
|  | * Return: | 
|  | *  * %0	- @out_cmd is ready to send. | 
|  | *  * %-ENOTTY	- Invalid command specified. | 
|  | *  * %-EINVAL	- Reserved fields or invalid values were used. | 
|  | *  * %-ENOMEM	- Input or output buffer wasn't sized properly. | 
|  | *  * %-EPERM	- Attempted to use a protected command. | 
|  | * | 
|  | * The result of this command is a fully validated command in @out_cmd that is | 
|  | * safe to send to the hardware. | 
|  | * | 
|  | * See handle_mailbox_cmd_from_user() | 
|  | */ | 
|  | static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, | 
|  | const struct cxl_send_command *send_cmd, | 
|  | struct cxl_mem_command *out_cmd) | 
|  | { | 
|  | const struct cxl_command_info *info; | 
|  | struct cxl_mem_command *c; | 
|  |  | 
|  | if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) | 
|  | return -ENOTTY; | 
|  |  | 
|  | /* | 
|  | * The user can never specify an input payload larger than what hardware | 
|  | * supports, but output can be arbitrarily large (simply write out as | 
|  | * much data as the hardware provides). | 
|  | */ | 
|  | if (send_cmd->in.size > cxlm->payload_size) | 
|  | return -EINVAL; | 
|  |  | 
|  | /* | 
|  | * Checks are bypassed for raw commands but a WARN/taint will occur | 
|  | * later in the callchain | 
|  | */ | 
|  | if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { | 
|  | const struct cxl_mem_command temp = { | 
|  | .info = { | 
|  | .id = CXL_MEM_COMMAND_ID_RAW, | 
|  | .flags = 0, | 
|  | .size_in = send_cmd->in.size, | 
|  | .size_out = send_cmd->out.size, | 
|  | }, | 
|  | .opcode = send_cmd->raw.opcode | 
|  | }; | 
|  |  | 
|  | if (send_cmd->raw.rsvd) | 
|  | return -EINVAL; | 
|  |  | 
|  | /* | 
|  | * Unlike supported commands, the output size of RAW commands | 
|  | * gets passed along without further checking, so it must be | 
|  | * validated here. | 
|  | */ | 
|  | if (send_cmd->out.size > cxlm->payload_size) | 
|  | return -EINVAL; | 
|  |  | 
|  | if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) | 
|  | return -EPERM; | 
|  |  | 
|  | memcpy(out_cmd, &temp, sizeof(temp)); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) | 
|  | return -EINVAL; | 
|  |  | 
|  | if (send_cmd->rsvd) | 
|  | return -EINVAL; | 
|  |  | 
|  | if (send_cmd->in.rsvd || send_cmd->out.rsvd) | 
|  | return -EINVAL; | 
|  |  | 
|  | /* Convert user's command into the internal representation */ | 
|  | c = &mem_commands[send_cmd->id]; | 
|  | info = &c->info; | 
|  |  | 
|  | /* Check that the command is enabled for hardware */ | 
|  | if (!test_bit(info->id, cxlm->enabled_cmds)) | 
|  | return -ENOTTY; | 
|  |  | 
|  | /* Check the input buffer is the expected size */ | 
|  | if (info->size_in >= 0 && info->size_in != send_cmd->in.size) | 
|  | return -ENOMEM; | 
|  |  | 
|  | /* Check the output buffer is at least large enough */ | 
|  | if (info->size_out >= 0 && send_cmd->out.size < info->size_out) | 
|  | return -ENOMEM; | 
|  |  | 
|  | memcpy(out_cmd, c, sizeof(*c)); | 
|  | out_cmd->info.size_in = send_cmd->in.size; | 
|  | /* | 
|  | * XXX: out_cmd->info.size_out will be controlled by the driver, and the | 
|  | * specified number of bytes @send_cmd->out.size will be copied back out | 
|  | * to userspace. | 
|  | */ | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_query_cmd(struct cxl_memdev *cxlmd, | 
|  | struct cxl_mem_query_commands __user *q) | 
|  | { | 
|  | struct device *dev = &cxlmd->dev; | 
|  | struct cxl_mem_command *cmd; | 
|  | u32 n_commands; | 
|  | int j = 0; | 
|  |  | 
|  | dev_dbg(dev, "Query IOCTL\n"); | 
|  |  | 
|  | if (get_user(n_commands, &q->n_commands)) | 
|  | return -EFAULT; | 
|  |  | 
|  | /* returns the total number if 0 elements are requested. */ | 
|  | if (n_commands == 0) | 
|  | return put_user(cxl_cmd_count, &q->n_commands); | 
|  |  | 
|  | /* | 
|  | * otherwise, return max(n_commands, total commands) cxl_command_info | 
|  | * structures. | 
|  | */ | 
|  | cxl_for_each_cmd(cmd) { | 
|  | const struct cxl_command_info *info = &cmd->info; | 
|  |  | 
|  | if (copy_to_user(&q->commands[j++], info, sizeof(*info))) | 
|  | return -EFAULT; | 
|  |  | 
|  | if (j == n_commands) | 
|  | break; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_send_cmd(struct cxl_memdev *cxlmd, | 
|  | struct cxl_send_command __user *s) | 
|  | { | 
|  | struct cxl_mem *cxlm = cxlmd->cxlm; | 
|  | struct device *dev = &cxlmd->dev; | 
|  | struct cxl_send_command send; | 
|  | struct cxl_mem_command c; | 
|  | int rc; | 
|  |  | 
|  | dev_dbg(dev, "Send IOCTL\n"); | 
|  |  | 
|  | if (copy_from_user(&send, s, sizeof(send))) | 
|  | return -EFAULT; | 
|  |  | 
|  | rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | /* Prepare to handle a full payload for variable sized output */ | 
|  | if (c.info.size_out < 0) | 
|  | c.info.size_out = cxlm->payload_size; | 
|  |  | 
|  | rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, | 
|  | send.out.payload, &send.out.size, | 
|  | &send.retval); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | if (copy_to_user(s, &send, sizeof(send))) | 
|  | return -EFAULT; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, | 
|  | unsigned long arg) | 
|  | { | 
|  | switch (cmd) { | 
|  | case CXL_MEM_QUERY_COMMANDS: | 
|  | return cxl_query_cmd(cxlmd, (void __user *)arg); | 
|  | case CXL_MEM_SEND_COMMAND: | 
|  | return cxl_send_cmd(cxlmd, (void __user *)arg); | 
|  | default: | 
|  | return -ENOTTY; | 
|  | } | 
|  | } | 
|  |  | 
|  | static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, | 
|  | unsigned long arg) | 
|  | { | 
|  | struct cxl_memdev *cxlmd = file->private_data; | 
|  | int rc = -ENXIO; | 
|  |  | 
|  | down_read(&cxl_memdev_rwsem); | 
|  | if (cxlmd->cxlm) | 
|  | rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); | 
|  | up_read(&cxl_memdev_rwsem); | 
|  |  | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | static int cxl_memdev_open(struct inode *inode, struct file *file) | 
|  | { | 
|  | struct cxl_memdev *cxlmd = | 
|  | container_of(inode->i_cdev, typeof(*cxlmd), cdev); | 
|  |  | 
|  | get_device(&cxlmd->dev); | 
|  | file->private_data = cxlmd; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_memdev_release_file(struct inode *inode, struct file *file) | 
|  | { | 
|  | struct cxl_memdev *cxlmd = | 
|  | container_of(inode->i_cdev, typeof(*cxlmd), cdev); | 
|  |  | 
|  | put_device(&cxlmd->dev); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static void cxl_memdev_shutdown(struct device *dev) | 
|  | { | 
|  | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); | 
|  |  | 
|  | down_write(&cxl_memdev_rwsem); | 
|  | cxlmd->cxlm = NULL; | 
|  | up_write(&cxl_memdev_rwsem); | 
|  | } | 
|  |  | 
|  | static const struct cdevm_file_operations cxl_memdev_fops = { | 
|  | .fops = { | 
|  | .owner = THIS_MODULE, | 
|  | .unlocked_ioctl = cxl_memdev_ioctl, | 
|  | .open = cxl_memdev_open, | 
|  | .release = cxl_memdev_release_file, | 
|  | .compat_ioctl = compat_ptr_ioctl, | 
|  | .llseek = noop_llseek, | 
|  | }, | 
|  | .shutdown = cxl_memdev_shutdown, | 
|  | }; | 
|  |  | 
|  | static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) | 
|  | { | 
|  | struct cxl_mem_command *c; | 
|  |  | 
|  | cxl_for_each_cmd(c) | 
|  | if (c->opcode == opcode) | 
|  | return c; | 
|  |  | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. | 
|  | * @cxlm: The CXL memory device to communicate with. | 
|  | * @opcode: Opcode for the mailbox command. | 
|  | * @in: The input payload for the mailbox command. | 
|  | * @in_size: The length of the input payload | 
|  | * @out: Caller allocated buffer for the output. | 
|  | * @out_size: Expected size of output. | 
|  | * | 
|  | * Context: Any context. Will acquire and release mbox_mutex. | 
|  | * Return: | 
|  | *  * %>=0	- Number of bytes returned in @out. | 
|  | *  * %-E2BIG	- Payload is too large for hardware. | 
|  | *  * %-EBUSY	- Couldn't acquire exclusive mailbox access. | 
|  | *  * %-EFAULT	- Hardware error occurred. | 
|  | *  * %-ENXIO	- Command completed, but device reported an error. | 
|  | *  * %-EIO	- Unexpected output size. | 
|  | * | 
|  | * Mailbox commands may execute successfully yet the device itself reported an | 
|  | * error. While this distinction can be useful for commands from userspace, the | 
|  | * kernel will only be able to use results when both are successful. | 
|  | * | 
|  | * See __cxl_mem_mbox_send_cmd() | 
|  | */ | 
|  | static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, | 
|  | void *in, size_t in_size, | 
|  | void *out, size_t out_size) | 
|  | { | 
|  | const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); | 
|  | struct mbox_cmd mbox_cmd = { | 
|  | .opcode = opcode, | 
|  | .payload_in = in, | 
|  | .size_in = in_size, | 
|  | .size_out = out_size, | 
|  | .payload_out = out, | 
|  | }; | 
|  | int rc; | 
|  |  | 
|  | if (out_size > cxlm->payload_size) | 
|  | return -E2BIG; | 
|  |  | 
|  | rc = cxl_mem_mbox_get(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); | 
|  | cxl_mem_mbox_put(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | /* TODO: Map return code to proper kernel style errno */ | 
|  | if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) | 
|  | return -ENXIO; | 
|  |  | 
|  | /* | 
|  | * Variable sized commands can't be validated and so it's up to the | 
|  | * caller to do that if they wish. | 
|  | */ | 
|  | if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) | 
|  | return -EIO; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) | 
|  | { | 
|  | const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); | 
|  |  | 
|  | cxlm->payload_size = | 
|  | 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); | 
|  |  | 
|  | /* | 
|  | * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register | 
|  | * | 
|  | * If the size is too small, mandatory commands will not work and so | 
|  | * there's no point in going forward. If the size is too large, there's | 
|  | * no harm is soft limiting it. | 
|  | */ | 
|  | cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); | 
|  | if (cxlm->payload_size < 256) { | 
|  | dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)", | 
|  | cxlm->payload_size); | 
|  | return -ENXIO; | 
|  | } | 
|  |  | 
|  | dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu", | 
|  | cxlm->payload_size); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) | 
|  | { | 
|  | struct device *dev = &pdev->dev; | 
|  | struct cxl_mem *cxlm; | 
|  |  | 
|  | cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); | 
|  | if (!cxlm) { | 
|  | dev_err(dev, "No memory available\n"); | 
|  | return ERR_PTR(-ENOMEM); | 
|  | } | 
|  |  | 
|  | mutex_init(&cxlm->mbox_mutex); | 
|  | cxlm->pdev = pdev; | 
|  | cxlm->enabled_cmds = | 
|  | devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), | 
|  | sizeof(unsigned long), | 
|  | GFP_KERNEL | __GFP_ZERO); | 
|  | if (!cxlm->enabled_cmds) { | 
|  | dev_err(dev, "No memory available for bitmap\n"); | 
|  | return ERR_PTR(-ENOMEM); | 
|  | } | 
|  |  | 
|  | return cxlm; | 
|  | } | 
|  |  | 
|  | static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, | 
|  | u8 bar, u64 offset) | 
|  | { | 
|  | struct pci_dev *pdev = cxlm->pdev; | 
|  | struct device *dev = &pdev->dev; | 
|  | void __iomem *addr; | 
|  |  | 
|  | /* Basic sanity check that BAR is big enough */ | 
|  | if (pci_resource_len(pdev, bar) < offset) { | 
|  | dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, | 
|  | &pdev->resource[bar], (unsigned long long)offset); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | addr = pci_iomap(pdev, bar, 0); | 
|  | if (!addr) { | 
|  | dev_err(dev, "failed to map registers\n"); | 
|  | return addr; | 
|  | } | 
|  |  | 
|  | dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", | 
|  | bar, offset); | 
|  |  | 
|  | return addr; | 
|  | } | 
|  |  | 
|  | static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) | 
|  | { | 
|  | pci_iounmap(cxlm->pdev, base); | 
|  | } | 
|  |  | 
|  | static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) | 
|  | { | 
|  | int pos; | 
|  |  | 
|  | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); | 
|  | if (!pos) | 
|  | return 0; | 
|  |  | 
|  | while (pos) { | 
|  | u16 vendor, id; | 
|  |  | 
|  | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); | 
|  | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); | 
|  | if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) | 
|  | return pos; | 
|  |  | 
|  | pos = pci_find_next_ext_capability(pdev, pos, | 
|  | PCI_EXT_CAP_ID_DVSEC); | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, | 
|  | struct cxl_register_map *map) | 
|  | { | 
|  | struct pci_dev *pdev = cxlm->pdev; | 
|  | struct device *dev = &pdev->dev; | 
|  | struct cxl_component_reg_map *comp_map; | 
|  | struct cxl_device_reg_map *dev_map; | 
|  |  | 
|  | switch (map->reg_type) { | 
|  | case CXL_REGLOC_RBI_COMPONENT: | 
|  | comp_map = &map->component_map; | 
|  | cxl_probe_component_regs(dev, base, comp_map); | 
|  | if (!comp_map->hdm_decoder.valid) { | 
|  | dev_err(dev, "HDM decoder registers not found\n"); | 
|  | return -ENXIO; | 
|  | } | 
|  |  | 
|  | dev_dbg(dev, "Set up component registers\n"); | 
|  | break; | 
|  | case CXL_REGLOC_RBI_MEMDEV: | 
|  | dev_map = &map->device_map; | 
|  | cxl_probe_device_regs(dev, base, dev_map); | 
|  | if (!dev_map->status.valid || !dev_map->mbox.valid || | 
|  | !dev_map->memdev.valid) { | 
|  | dev_err(dev, "registers not found: %s%s%s\n", | 
|  | !dev_map->status.valid ? "status " : "", | 
|  | !dev_map->mbox.valid ? "mbox " : "", | 
|  | !dev_map->memdev.valid ? "memdev " : ""); | 
|  | return -ENXIO; | 
|  | } | 
|  |  | 
|  | dev_dbg(dev, "Probing device registers...\n"); | 
|  | break; | 
|  | default: | 
|  | break; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) | 
|  | { | 
|  | struct pci_dev *pdev = cxlm->pdev; | 
|  | struct device *dev = &pdev->dev; | 
|  |  | 
|  | switch (map->reg_type) { | 
|  | case CXL_REGLOC_RBI_COMPONENT: | 
|  | cxl_map_component_regs(pdev, &cxlm->regs.component, map); | 
|  | dev_dbg(dev, "Mapping component registers...\n"); | 
|  | break; | 
|  | case CXL_REGLOC_RBI_MEMDEV: | 
|  | cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); | 
|  | dev_dbg(dev, "Probing device registers...\n"); | 
|  | break; | 
|  | default: | 
|  | break; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, | 
|  | u8 *bar, u64 *offset, u8 *reg_type) | 
|  | { | 
|  | *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); | 
|  | *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); | 
|  | *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_setup_regs() - Setup necessary MMIO. | 
|  | * @cxlm: The CXL memory device to communicate with. | 
|  | * | 
|  | * Return: 0 if all necessary registers mapped. | 
|  | * | 
|  | * A memory device is required by spec to implement a certain set of MMIO | 
|  | * regions. The purpose of this function is to enumerate and map those | 
|  | * registers. | 
|  | */ | 
|  | static int cxl_mem_setup_regs(struct cxl_mem *cxlm) | 
|  | { | 
|  | struct pci_dev *pdev = cxlm->pdev; | 
|  | struct device *dev = &pdev->dev; | 
|  | u32 regloc_size, regblocks; | 
|  | void __iomem *base; | 
|  | int regloc, i, n_maps; | 
|  | struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; | 
|  | int ret = 0; | 
|  |  | 
|  | regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); | 
|  | if (!regloc) { | 
|  | dev_err(dev, "register location dvsec not found\n"); | 
|  | return -ENXIO; | 
|  | } | 
|  |  | 
|  | if (pci_request_mem_regions(pdev, pci_name(pdev))) | 
|  | return -ENODEV; | 
|  |  | 
|  | /* Get the size of the Register Locator DVSEC */ | 
|  | pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); | 
|  | regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); | 
|  |  | 
|  | regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; | 
|  | regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; | 
|  |  | 
|  | for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { | 
|  | u32 reg_lo, reg_hi; | 
|  | u8 reg_type; | 
|  | u64 offset; | 
|  | u8 bar; | 
|  |  | 
|  | pci_read_config_dword(pdev, regloc, ®_lo); | 
|  | pci_read_config_dword(pdev, regloc + 4, ®_hi); | 
|  |  | 
|  | cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, | 
|  | ®_type); | 
|  |  | 
|  | dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", | 
|  | bar, offset, reg_type); | 
|  |  | 
|  | /* Ignore unknown register block types */ | 
|  | if (reg_type > CXL_REGLOC_RBI_MEMDEV) | 
|  | continue; | 
|  |  | 
|  | base = cxl_mem_map_regblock(cxlm, bar, offset); | 
|  | if (!base) | 
|  | return -ENOMEM; | 
|  |  | 
|  | map = &maps[n_maps]; | 
|  | map->barno = bar; | 
|  | map->block_offset = offset; | 
|  | map->reg_type = reg_type; | 
|  |  | 
|  | ret = cxl_probe_regs(cxlm, base + offset, map); | 
|  |  | 
|  | /* Always unmap the regblock regardless of probe success */ | 
|  | cxl_mem_unmap_regblock(cxlm, base); | 
|  |  | 
|  | if (ret) | 
|  | return ret; | 
|  |  | 
|  | n_maps++; | 
|  | } | 
|  |  | 
|  | pci_release_mem_regions(pdev); | 
|  |  | 
|  | for (i = 0; i < n_maps; i++) { | 
|  | ret = cxl_map_regs(cxlm, &maps[i]); | 
|  | if (ret) | 
|  | break; | 
|  | } | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) | 
|  | { | 
|  | u32 remaining = size; | 
|  | u32 offset = 0; | 
|  |  | 
|  | while (remaining) { | 
|  | u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); | 
|  | struct cxl_mbox_get_log { | 
|  | uuid_t uuid; | 
|  | __le32 offset; | 
|  | __le32 length; | 
|  | } __packed log = { | 
|  | .uuid = *uuid, | 
|  | .offset = cpu_to_le32(offset), | 
|  | .length = cpu_to_le32(xfer_size) | 
|  | }; | 
|  | int rc; | 
|  |  | 
|  | rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, | 
|  | sizeof(log), out, xfer_size); | 
|  | if (rc < 0) | 
|  | return rc; | 
|  |  | 
|  | out += xfer_size; | 
|  | remaining -= xfer_size; | 
|  | offset += xfer_size; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_walk_cel() - Walk through the Command Effects Log. | 
|  | * @cxlm: Device. | 
|  | * @size: Length of the Command Effects Log. | 
|  | * @cel: CEL | 
|  | * | 
|  | * Iterate over each entry in the CEL and determine if the driver supports the | 
|  | * command. If so, the command is enabled for the device and can be used later. | 
|  | */ | 
|  | static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) | 
|  | { | 
|  | struct cel_entry { | 
|  | __le16 opcode; | 
|  | __le16 effect; | 
|  | } __packed * cel_entry; | 
|  | const int cel_entries = size / sizeof(*cel_entry); | 
|  | int i; | 
|  |  | 
|  | cel_entry = (struct cel_entry *)cel; | 
|  |  | 
|  | for (i = 0; i < cel_entries; i++) { | 
|  | u16 opcode = le16_to_cpu(cel_entry[i].opcode); | 
|  | struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); | 
|  |  | 
|  | if (!cmd) { | 
|  | dev_dbg(&cxlm->pdev->dev, | 
|  | "Opcode 0x%04x unsupported by driver", opcode); | 
|  | continue; | 
|  | } | 
|  |  | 
|  | set_bit(cmd->info.id, cxlm->enabled_cmds); | 
|  | } | 
|  | } | 
|  |  | 
|  | struct cxl_mbox_get_supported_logs { | 
|  | __le16 entries; | 
|  | u8 rsvd[6]; | 
|  | struct gsl_entry { | 
|  | uuid_t uuid; | 
|  | __le32 size; | 
|  | } __packed entry[]; | 
|  | } __packed; | 
|  |  | 
|  | static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) | 
|  | { | 
|  | struct cxl_mbox_get_supported_logs *ret; | 
|  | int rc; | 
|  |  | 
|  | ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); | 
|  | if (!ret) | 
|  | return ERR_PTR(-ENOMEM); | 
|  |  | 
|  | rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, | 
|  | 0, ret, cxlm->payload_size); | 
|  | if (rc < 0) { | 
|  | kvfree(ret); | 
|  | return ERR_PTR(rc); | 
|  | } | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_get_partition_info - Get partition info | 
|  | * @cxlm: The device to act on | 
|  | * @active_volatile_bytes: returned active volatile capacity | 
|  | * @active_persistent_bytes: returned active persistent capacity | 
|  | * @next_volatile_bytes: return next volatile capacity | 
|  | * @next_persistent_bytes: return next persistent capacity | 
|  | * | 
|  | * Retrieve the current partition info for the device specified.  If not 0, the | 
|  | * 'next' values are pending and take affect on next cold reset. | 
|  | * | 
|  | * Return: 0 if no error: or the result of the mailbox command. | 
|  | * | 
|  | * See CXL @8.2.9.5.2.1 Get Partition Info | 
|  | */ | 
|  | static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, | 
|  | u64 *active_volatile_bytes, | 
|  | u64 *active_persistent_bytes, | 
|  | u64 *next_volatile_bytes, | 
|  | u64 *next_persistent_bytes) | 
|  | { | 
|  | struct cxl_mbox_get_partition_info { | 
|  | __le64 active_volatile_cap; | 
|  | __le64 active_persistent_cap; | 
|  | __le64 next_volatile_cap; | 
|  | __le64 next_persistent_cap; | 
|  | } __packed pi; | 
|  | int rc; | 
|  |  | 
|  | rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, | 
|  | NULL, 0, &pi, sizeof(pi)); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap); | 
|  | *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap); | 
|  | *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap); | 
|  | *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap); | 
|  |  | 
|  | *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  | *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  | *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  | *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_enumerate_cmds() - Enumerate commands for a device. | 
|  | * @cxlm: The device. | 
|  | * | 
|  | * Returns 0 if enumerate completed successfully. | 
|  | * | 
|  | * CXL devices have optional support for certain commands. This function will | 
|  | * determine the set of supported commands for the hardware and update the | 
|  | * enabled_cmds bitmap in the @cxlm. | 
|  | */ | 
|  | static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) | 
|  | { | 
|  | struct cxl_mbox_get_supported_logs *gsl; | 
|  | struct device *dev = &cxlm->pdev->dev; | 
|  | struct cxl_mem_command *cmd; | 
|  | int i, rc; | 
|  |  | 
|  | gsl = cxl_get_gsl(cxlm); | 
|  | if (IS_ERR(gsl)) | 
|  | return PTR_ERR(gsl); | 
|  |  | 
|  | rc = -ENOENT; | 
|  | for (i = 0; i < le16_to_cpu(gsl->entries); i++) { | 
|  | u32 size = le32_to_cpu(gsl->entry[i].size); | 
|  | uuid_t uuid = gsl->entry[i].uuid; | 
|  | u8 *log; | 
|  |  | 
|  | dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); | 
|  |  | 
|  | if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) | 
|  | continue; | 
|  |  | 
|  | log = kvmalloc(size, GFP_KERNEL); | 
|  | if (!log) { | 
|  | rc = -ENOMEM; | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | rc = cxl_xfer_log(cxlm, &uuid, size, log); | 
|  | if (rc) { | 
|  | kvfree(log); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | cxl_walk_cel(cxlm, size, log); | 
|  | kvfree(log); | 
|  |  | 
|  | /* In case CEL was bogus, enable some default commands. */ | 
|  | cxl_for_each_cmd(cmd) | 
|  | if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) | 
|  | set_bit(cmd->info.id, cxlm->enabled_cmds); | 
|  |  | 
|  | /* Found the required CEL */ | 
|  | rc = 0; | 
|  | } | 
|  |  | 
|  | out: | 
|  | kvfree(gsl); | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * cxl_mem_identify() - Send the IDENTIFY command to the device. | 
|  | * @cxlm: The device to identify. | 
|  | * | 
|  | * Return: 0 if identify was executed successfully. | 
|  | * | 
|  | * This will dispatch the identify command to the device and on success populate | 
|  | * structures to be exported to sysfs. | 
|  | */ | 
|  | static int cxl_mem_identify(struct cxl_mem *cxlm) | 
|  | { | 
|  | /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ | 
|  | struct cxl_mbox_identify { | 
|  | char fw_revision[0x10]; | 
|  | __le64 total_capacity; | 
|  | __le64 volatile_capacity; | 
|  | __le64 persistent_capacity; | 
|  | __le64 partition_align; | 
|  | __le16 info_event_log_size; | 
|  | __le16 warning_event_log_size; | 
|  | __le16 failure_event_log_size; | 
|  | __le16 fatal_event_log_size; | 
|  | __le32 lsa_size; | 
|  | u8 poison_list_max_mer[3]; | 
|  | __le16 inject_poison_limit; | 
|  | u8 poison_caps; | 
|  | u8 qos_telemetry_caps; | 
|  | } __packed id; | 
|  | int rc; | 
|  |  | 
|  | rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, | 
|  | sizeof(id)); | 
|  | if (rc < 0) | 
|  | return rc; | 
|  |  | 
|  | cxlm->total_bytes = le64_to_cpu(id.total_capacity); | 
|  | cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  |  | 
|  | cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity); | 
|  | cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  |  | 
|  | cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity); | 
|  | cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  |  | 
|  | cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); | 
|  | cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; | 
|  |  | 
|  | dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n" | 
|  | "     total_bytes = %#llx\n" | 
|  | "     volatile_only_bytes = %#llx\n" | 
|  | "     persistent_only_bytes = %#llx\n" | 
|  | "     partition_align_bytes = %#llx\n", | 
|  | cxlm->total_bytes, | 
|  | cxlm->volatile_only_bytes, | 
|  | cxlm->persistent_only_bytes, | 
|  | cxlm->partition_align_bytes); | 
|  |  | 
|  | cxlm->lsa_size = le32_to_cpu(id.lsa_size); | 
|  | memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_mem_create_range_info(struct cxl_mem *cxlm) | 
|  | { | 
|  | int rc; | 
|  |  | 
|  | if (cxlm->partition_align_bytes == 0) { | 
|  | cxlm->ram_range.start = 0; | 
|  | cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; | 
|  | cxlm->pmem_range.start = cxlm->volatile_only_bytes; | 
|  | cxlm->pmem_range.end = cxlm->volatile_only_bytes + | 
|  | cxlm->persistent_only_bytes - 1; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | rc = cxl_mem_get_partition_info(cxlm, | 
|  | &cxlm->active_volatile_bytes, | 
|  | &cxlm->active_persistent_bytes, | 
|  | &cxlm->next_volatile_bytes, | 
|  | &cxlm->next_persistent_bytes); | 
|  | if (rc < 0) { | 
|  | dev_err(&cxlm->pdev->dev, "Failed to query partition information\n"); | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n" | 
|  | "     active_volatile_bytes = %#llx\n" | 
|  | "     active_persistent_bytes = %#llx\n" | 
|  | "     next_volatile_bytes = %#llx\n" | 
|  | "     next_persistent_bytes = %#llx\n", | 
|  | cxlm->active_volatile_bytes, | 
|  | cxlm->active_persistent_bytes, | 
|  | cxlm->next_volatile_bytes, | 
|  | cxlm->next_persistent_bytes); | 
|  |  | 
|  | cxlm->ram_range.start = 0; | 
|  | cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; | 
|  |  | 
|  | cxlm->pmem_range.start = cxlm->active_volatile_bytes; | 
|  | cxlm->pmem_range.end = cxlm->active_volatile_bytes + | 
|  | cxlm->active_persistent_bytes - 1; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 
|  | { | 
|  | struct cxl_memdev *cxlmd; | 
|  | struct cxl_mem *cxlm; | 
|  | int rc; | 
|  |  | 
|  | rc = pcim_enable_device(pdev); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | cxlm = cxl_mem_create(pdev); | 
|  | if (IS_ERR(cxlm)) | 
|  | return PTR_ERR(cxlm); | 
|  |  | 
|  | rc = cxl_mem_setup_regs(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | rc = cxl_mem_setup_mailbox(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | rc = cxl_mem_enumerate_cmds(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | rc = cxl_mem_identify(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | rc = cxl_mem_create_range_info(cxlm); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); | 
|  | if (IS_ERR(cxlmd)) | 
|  | return PTR_ERR(cxlmd); | 
|  |  | 
|  | if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) | 
|  | rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); | 
|  |  | 
|  | return rc; | 
|  | } | 
|  |  | 
|  | static const struct pci_device_id cxl_mem_pci_tbl[] = { | 
|  | /* PCI class code for CXL.mem Type-3 Devices */ | 
|  | { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, | 
|  | { /* terminate list */ }, | 
|  | }; | 
|  | MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); | 
|  |  | 
|  | static struct pci_driver cxl_mem_driver = { | 
|  | .name			= KBUILD_MODNAME, | 
|  | .id_table		= cxl_mem_pci_tbl, | 
|  | .probe			= cxl_mem_probe, | 
|  | .driver	= { | 
|  | .probe_type	= PROBE_PREFER_ASYNCHRONOUS, | 
|  | }, | 
|  | }; | 
|  |  | 
|  | static __init int cxl_mem_init(void) | 
|  | { | 
|  | struct dentry *mbox_debugfs; | 
|  | int rc; | 
|  |  | 
|  | /* Double check the anonymous union trickery in struct cxl_regs */ | 
|  | BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != | 
|  | offsetof(struct cxl_regs, device_regs.memdev)); | 
|  |  | 
|  | rc = pci_register_driver(&cxl_mem_driver); | 
|  | if (rc) | 
|  | return rc; | 
|  |  | 
|  | cxl_debugfs = debugfs_create_dir("cxl", NULL); | 
|  | mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); | 
|  | debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, | 
|  | &cxl_raw_allow_all); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static __exit void cxl_mem_exit(void) | 
|  | { | 
|  | debugfs_remove_recursive(cxl_debugfs); | 
|  | pci_unregister_driver(&cxl_mem_driver); | 
|  | } | 
|  |  | 
|  | MODULE_LICENSE("GPL v2"); | 
|  | module_init(cxl_mem_init); | 
|  | module_exit(cxl_mem_exit); | 
|  | MODULE_IMPORT_NS(CXL); |