drivers/nvme: Add command infrastructure
Based on FreeBSD's implementation made by James Harris, Intel Copyright 2012-2016. This is the corner stone of the whole NVMe logic: sending commands and getting replies, all through memory shared from the host to the controller. Then using it to inialize admit/IO queues and identifying the controller. Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
parent
b7d4d74e03
commit
7499fae5cd
7 changed files with 1231 additions and 15 deletions
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
zephyr_library_sources(nvme_controller.c)
|
||||
zephyr_library_sources(nvme_controller.c nvme_cmd.c nvme_controller_cmd.c)
|
||||
|
|
|
|||
|
|
@ -37,6 +37,21 @@ config NVME_IO_ENTRIES
|
|||
This sets the amount of allocated IO queue entries.
|
||||
Do not touch this unless you know what you are doing.
|
||||
|
||||
config NVME_RETRY_COUNT
|
||||
int "Retry count"
|
||||
default 2
|
||||
help
|
||||
This sets the amount of possible retries per-request.
|
||||
Do not touch this unless you know what you are doing.
|
||||
|
||||
config NVME_REQUEST_TIMEOUT
|
||||
int "Timeout period for NVMe request"
|
||||
range 5 120
|
||||
default 5
|
||||
help
|
||||
This sets the waiting time for a request to succeed.
|
||||
Do not touch this unless you know what you are doing.
|
||||
|
||||
config NVME_INT_PRIORITY
|
||||
int "Interrupt priority"
|
||||
default 2
|
||||
|
|
|
|||
|
|
@ -423,6 +423,9 @@ struct nvme_controller {
|
|||
|
||||
msi_vector_t vectors[NVME_PCIE_MSIX_VECTORS];
|
||||
|
||||
struct nvme_controller_data cdata;
|
||||
|
||||
uint32_t num_io_queues;
|
||||
struct nvme_cmd_qpair *adminq;
|
||||
struct nvme_cmd_qpair *ioq;
|
||||
|
||||
|
|
|
|||
411
drivers/disk/nvme/nvme_cmd.c
Normal file
411
drivers/disk/nvme/nvme_cmd.c
Normal file
|
|
@ -0,0 +1,411 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright (c) 2022 Intel Corp.
|
||||
*/
|
||||
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_DECLARE(nvme, CONFIG_NVME_LOG_LEVEL);
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/cache.h>
|
||||
#include <zephyr/sys/byteorder.h>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "nvme.h"
|
||||
#include "nvme_helpers.h"
|
||||
|
||||
static struct nvme_request request_pool[NVME_REQUEST_AMOUNT];
|
||||
static sys_dlist_t free_request;
|
||||
static sys_dlist_t pending_request;
|
||||
|
||||
static void request_timeout(struct k_work *work);
|
||||
|
||||
static K_WORK_DELAYABLE_DEFINE(request_timer, request_timeout);
|
||||
|
||||
void nvme_cmd_init(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
sys_dlist_init(&free_request);
|
||||
sys_dlist_init(&pending_request);
|
||||
|
||||
for (idx = 0; idx < NVME_REQUEST_AMOUNT; idx++) {
|
||||
sys_dlist_append(&free_request, &request_pool[idx].node);
|
||||
}
|
||||
}
|
||||
|
||||
void nvme_cmd_request_free(struct nvme_request *request)
|
||||
{
|
||||
if (sys_dnode_is_linked(&request->node)) {
|
||||
sys_dlist_remove(&request->node);
|
||||
}
|
||||
|
||||
memset(request, 0, sizeof(struct nvme_request));
|
||||
sys_dlist_append(&free_request, &request->node);
|
||||
}
|
||||
|
||||
struct nvme_request *nvme_cmd_request_alloc(void)
|
||||
{
|
||||
sys_dnode_t *node;
|
||||
|
||||
node = sys_dlist_peek_head(&free_request);
|
||||
if (!node) {
|
||||
LOG_ERR("Could not allocate request");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sys_dlist_remove(node);
|
||||
|
||||
return CONTAINER_OF(node, struct nvme_request, node);
|
||||
}
|
||||
|
||||
static void nvme_cmd_register_request(struct nvme_request *request)
|
||||
{
|
||||
sys_dlist_append(&pending_request, &request->node);
|
||||
|
||||
request->req_start = k_uptime_get_32();
|
||||
|
||||
if (!k_work_delayable_remaining_get(&request_timer)) {
|
||||
k_work_reschedule(&request_timer,
|
||||
K_SECONDS(CONFIG_NVME_REQUEST_TIMEOUT));
|
||||
}
|
||||
}
|
||||
|
||||
static void request_timeout(struct k_work *work)
|
||||
{
|
||||
uint32_t current = k_uptime_get_32();
|
||||
struct nvme_request *request, *next;
|
||||
|
||||
ARG_UNUSED(work);
|
||||
|
||||
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&pending_request,
|
||||
request, next, node) {
|
||||
if ((int32_t)(request->req_start +
|
||||
CONFIG_NVME_REQUEST_TIMEOUT - current) > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
LOG_WRN("Request %p CID %u timed-out",
|
||||
request, request->cmd.cdw0.cid);
|
||||
|
||||
/* ToDo:
|
||||
* - check CSTS for fatal fault
|
||||
* - reset hw otherwise if it's the case
|
||||
* - or check completion for missed interruption
|
||||
*/
|
||||
|
||||
if (request->cb_fn) {
|
||||
request->cb_fn(request->cb_arg, NULL);
|
||||
}
|
||||
|
||||
nvme_cmd_request_free(request);
|
||||
}
|
||||
|
||||
if (request) {
|
||||
k_work_reschedule(&request_timer,
|
||||
K_SECONDS(request->req_start +
|
||||
CONFIG_NVME_REQUEST_TIMEOUT -
|
||||
current));
|
||||
}
|
||||
}
|
||||
|
||||
static bool nvme_completion_is_retry(const struct nvme_completion *cpl)
|
||||
{
|
||||
uint8_t sct, sc, dnr;
|
||||
|
||||
sct = NVME_STATUS_GET_SCT(cpl->status);
|
||||
sc = NVME_STATUS_GET_SC(cpl->status);
|
||||
dnr = NVME_STATUS_GET_DNR(cpl->status);
|
||||
|
||||
/*
|
||||
* TODO: spec is not clear how commands that are aborted due
|
||||
* to TLER will be marked. So for now, it seems
|
||||
* NAMESPACE_NOT_READY is the only case where we should
|
||||
* look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
|
||||
* set the DNR bit correctly since the driver controls that.
|
||||
*/
|
||||
switch (sct) {
|
||||
case NVME_SCT_GENERIC:
|
||||
switch (sc) {
|
||||
case NVME_SC_ABORTED_BY_REQUEST:
|
||||
case NVME_SC_NAMESPACE_NOT_READY:
|
||||
if (dnr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
case NVME_SC_INVALID_OPCODE:
|
||||
case NVME_SC_INVALID_FIELD:
|
||||
case NVME_SC_COMMAND_ID_CONFLICT:
|
||||
case NVME_SC_DATA_TRANSFER_ERROR:
|
||||
case NVME_SC_ABORTED_POWER_LOSS:
|
||||
case NVME_SC_INTERNAL_DEVICE_ERROR:
|
||||
case NVME_SC_ABORTED_SQ_DELETION:
|
||||
case NVME_SC_ABORTED_FAILED_FUSED:
|
||||
case NVME_SC_ABORTED_MISSING_FUSED:
|
||||
case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
|
||||
case NVME_SC_COMMAND_SEQUENCE_ERROR:
|
||||
case NVME_SC_LBA_OUT_OF_RANGE:
|
||||
case NVME_SC_CAPACITY_EXCEEDED:
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
case NVME_SCT_COMMAND_SPECIFIC:
|
||||
case NVME_SCT_MEDIA_ERROR:
|
||||
return false;
|
||||
case NVME_SCT_PATH_RELATED:
|
||||
switch (sc) {
|
||||
case NVME_SC_INTERNAL_PATH_ERROR:
|
||||
if (dnr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
case NVME_SCT_VENDOR_SPECIFIC:
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_cmd_request_complete(struct nvme_request *request,
|
||||
struct nvme_completion *cpl)
|
||||
{
|
||||
bool error, retriable, retry;
|
||||
|
||||
error = nvme_completion_is_error(cpl);
|
||||
retriable = nvme_completion_is_retry(cpl);
|
||||
retry = error && retriable &&
|
||||
request->retries < CONFIG_NVME_RETRY_COUNT;
|
||||
|
||||
if (retry) {
|
||||
LOG_DBG("CMD will be retried");
|
||||
request->qpair->num_retries++;
|
||||
}
|
||||
|
||||
if (error &&
|
||||
(!retriable || (request->retries >= CONFIG_NVME_RETRY_COUNT))) {
|
||||
LOG_DBG("CMD error");
|
||||
request->qpair->num_failures++;
|
||||
}
|
||||
|
||||
if (cpl->cid != request->cmd.cdw0.cid) {
|
||||
LOG_ERR("cpl cid != cmd cid");
|
||||
}
|
||||
|
||||
if (retry) {
|
||||
LOG_DBG("Retrying CMD");
|
||||
/* Let's remove it from pending... */
|
||||
sys_dlist_remove(&request->node);
|
||||
/* ...and re-submit, thus re-adding to pending */
|
||||
nvme_cmd_qpair_submit_request(request->qpair, request);
|
||||
request->retries++;
|
||||
} else {
|
||||
LOG_DBG("Request %p CMD complete on %p/%p",
|
||||
request, request->cb_fn, request->cb_arg);
|
||||
|
||||
if (request->cb_fn) {
|
||||
request->cb_fn(request->cb_arg, cpl);
|
||||
}
|
||||
|
||||
nvme_cmd_request_free(request);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_cmd_qpair_process_completion(struct nvme_cmd_qpair *qpair)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_completion cpl;
|
||||
int done = 0;
|
||||
|
||||
if (qpair->num_intr_handler_calls == 0 && qpair->phase == 0) {
|
||||
LOG_WRN("Phase wrong for first interrupt call.");
|
||||
}
|
||||
|
||||
qpair->num_intr_handler_calls++;
|
||||
|
||||
while (1) {
|
||||
uint16_t status;
|
||||
|
||||
status = sys_le16_to_cpu(qpair->cpl[qpair->cq_head].status);
|
||||
if (NVME_STATUS_GET_P(status) != qpair->phase) {
|
||||
break;
|
||||
}
|
||||
|
||||
cpl = qpair->cpl[qpair->cq_head];
|
||||
nvme_completion_swapbytes(&cpl);
|
||||
|
||||
if (NVME_STATUS_GET_P(status) != NVME_STATUS_GET_P(cpl.status)) {
|
||||
LOG_WRN("Phase unexpectedly inconsistent");
|
||||
}
|
||||
|
||||
if (cpl.cid < NVME_REQUEST_AMOUNT) {
|
||||
request = &request_pool[cpl.cid];
|
||||
} else {
|
||||
request = NULL;
|
||||
}
|
||||
|
||||
done++;
|
||||
if (request != NULL) {
|
||||
nvme_cmd_request_complete(request, &cpl);
|
||||
qpair->sq_head = cpl.sqhd;
|
||||
} else {
|
||||
LOG_ERR("cpl (cid = %u) does not map to cmd", cpl.cid);
|
||||
}
|
||||
|
||||
qpair->cq_head++;
|
||||
if (qpair->cq_head == qpair->num_entries) {
|
||||
qpair->cq_head = 0;
|
||||
qpair->phase = !qpair->phase;
|
||||
}
|
||||
}
|
||||
|
||||
if (done != 0) {
|
||||
mm_reg_t regs = DEVICE_MMIO_GET(qpair->ctrlr->dev);
|
||||
|
||||
sys_write32(qpair->cq_head, regs + qpair->cq_hdbl_off);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_cmd_qpair_msi_handler(const void *arg)
|
||||
{
|
||||
const struct nvme_cmd_qpair *qpair = arg;
|
||||
|
||||
nvme_cmd_qpair_process_completion((struct nvme_cmd_qpair *)qpair);
|
||||
}
|
||||
|
||||
int nvme_cmd_qpair_setup(struct nvme_cmd_qpair *qpair,
|
||||
struct nvme_controller *ctrlr,
|
||||
uint32_t id)
|
||||
{
|
||||
const struct nvme_controller_config *nvme_ctrlr_cfg =
|
||||
ctrlr->dev->config;
|
||||
|
||||
qpair->ctrlr = ctrlr;
|
||||
qpair->id = id;
|
||||
qpair->vector = qpair->id;
|
||||
|
||||
qpair->num_cmds = 0;
|
||||
qpair->num_intr_handler_calls = 0;
|
||||
qpair->num_retries = 0;
|
||||
qpair->num_failures = 0;
|
||||
qpair->num_ignored = 0;
|
||||
|
||||
qpair->cmd_bus_addr = (uintptr_t)qpair->cmd;
|
||||
qpair->cpl_bus_addr = (uintptr_t)qpair->cpl;
|
||||
|
||||
qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell) +
|
||||
(qpair->id << (ctrlr->dstrd + 1));
|
||||
qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell) +
|
||||
(qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
|
||||
|
||||
if (!pcie_msi_vector_connect(nvme_ctrlr_cfg->pcie->bdf,
|
||||
&ctrlr->vectors[qpair->vector],
|
||||
nvme_cmd_qpair_msi_handler, qpair, 0)) {
|
||||
LOG_ERR("Failed to connect MSI-X vector %u", qpair->id);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
LOG_DBG("CMD Qpair created ID %u, %u entries - cmd/cpl addr "
|
||||
"0x%lx/0x%lx - sq/cq offsets %u/%u",
|
||||
qpair->id, qpair->num_entries, qpair->cmd_bus_addr,
|
||||
qpair->cpl_bus_addr, qpair->sq_tdbl_off, qpair->cq_hdbl_off);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvme_cmd_qpair_reset(struct nvme_cmd_qpair *qpair)
|
||||
{
|
||||
qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
|
||||
|
||||
/*
|
||||
* First time through the completion queue, HW will set phase
|
||||
* bit on completions to 1. So set this to 1 here, indicating
|
||||
* we're looking for a 1 to know which entries have completed.
|
||||
* we'll toggle the bit each time when the completion queue
|
||||
* rolls over.
|
||||
*/
|
||||
qpair->phase = 1;
|
||||
|
||||
memset(qpair->cmd, 0,
|
||||
qpair->num_entries * sizeof(struct nvme_command));
|
||||
memset(qpair->cpl, 0,
|
||||
qpair->num_entries * sizeof(struct nvme_completion));
|
||||
}
|
||||
|
||||
static int nvme_cmd_qpair_fill_dptr(struct nvme_cmd_qpair *qpair,
|
||||
struct nvme_request *request)
|
||||
{
|
||||
switch (request->type) {
|
||||
case NVME_REQUEST_NULL:
|
||||
break;
|
||||
case NVME_REQUEST_VADDR:
|
||||
if (request->payload_size > qpair->ctrlr->max_xfer_size) {
|
||||
LOG_ERR("VADDR request's payload too big");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
request->cmd.dptr.prp1 =
|
||||
(uint64_t)sys_cpu_to_le64(request->payload);
|
||||
request->cmd.dptr.prp2 = 0;
|
||||
|
||||
/* ToDo: handle > page_size payload through prp list */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvme_cmd_qpair_submit_request(struct nvme_cmd_qpair *qpair,
|
||||
struct nvme_request *request)
|
||||
{
|
||||
mm_reg_t regs = DEVICE_MMIO_GET(qpair->ctrlr->dev);
|
||||
int ret;
|
||||
|
||||
request->qpair = qpair;
|
||||
|
||||
request->cmd.cdw0.cid = sys_cpu_to_le16((uint16_t)(request -
|
||||
request_pool));
|
||||
|
||||
ret = nvme_cmd_qpair_fill_dptr(qpair, request);
|
||||
if (ret != 0) {
|
||||
nvme_cmd_request_free(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvme_cmd_register_request(request);
|
||||
|
||||
memcpy(&qpair->cmd[qpair->sq_tail],
|
||||
&request->cmd, sizeof(request->cmd));
|
||||
|
||||
qpair->sq_tail++;
|
||||
if (qpair->sq_tail == qpair->num_entries) {
|
||||
qpair->sq_tail = 0;
|
||||
}
|
||||
|
||||
sys_write32(qpair->sq_tail, regs + qpair->sq_tdbl_off);
|
||||
qpair->num_cmds++;
|
||||
|
||||
LOG_DBG("Request %p %llu submitted: CID %u - sq_tail %u",
|
||||
request, qpair->num_cmds, request->cmd.cdw0.cid,
|
||||
qpair->sq_tail - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
|
||||
{
|
||||
struct nvme_completion_poll_status *status = arg;
|
||||
|
||||
if (cpl != NULL) {
|
||||
memcpy(&status->cpl, cpl, sizeof(*cpl));
|
||||
} else {
|
||||
status->status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
k_sem_give(&status->sem);
|
||||
}
|
||||
|
|
@ -66,8 +66,443 @@ struct nvme_completion {
|
|||
|
||||
/* dword 3 */
|
||||
uint16_t cid; /* command identifier */
|
||||
uint16_t p : 1; /* phase tag */
|
||||
uint16_t status : 15;
|
||||
uint16_t status;
|
||||
} __aligned(8);
|
||||
|
||||
struct nvme_completion_poll_status {
|
||||
int status;
|
||||
struct nvme_completion cpl;
|
||||
struct k_sem sem;
|
||||
};
|
||||
|
||||
/* status code types */
|
||||
enum nvme_status_code_type {
|
||||
NVME_SCT_GENERIC = 0x0,
|
||||
NVME_SCT_COMMAND_SPECIFIC = 0x1,
|
||||
NVME_SCT_MEDIA_ERROR = 0x2,
|
||||
NVME_SCT_PATH_RELATED = 0x3,
|
||||
/* 0x3-0x6 - reserved */
|
||||
NVME_SCT_VENDOR_SPECIFIC = 0x7,
|
||||
};
|
||||
|
||||
/* generic command status codes */
|
||||
enum nvme_generic_command_status_code {
|
||||
NVME_SC_SUCCESS = 0x00,
|
||||
NVME_SC_INVALID_OPCODE = 0x01,
|
||||
NVME_SC_INVALID_FIELD = 0x02,
|
||||
NVME_SC_COMMAND_ID_CONFLICT = 0x03,
|
||||
NVME_SC_DATA_TRANSFER_ERROR = 0x04,
|
||||
NVME_SC_ABORTED_POWER_LOSS = 0x05,
|
||||
NVME_SC_INTERNAL_DEVICE_ERROR = 0x06,
|
||||
NVME_SC_ABORTED_BY_REQUEST = 0x07,
|
||||
NVME_SC_ABORTED_SQ_DELETION = 0x08,
|
||||
NVME_SC_ABORTED_FAILED_FUSED = 0x09,
|
||||
NVME_SC_ABORTED_MISSING_FUSED = 0x0a,
|
||||
NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b,
|
||||
NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c,
|
||||
NVME_SC_INVALID_SGL_SEGMENT_DESCR = 0x0d,
|
||||
NVME_SC_INVALID_NUMBER_OF_SGL_DESCR = 0x0e,
|
||||
NVME_SC_DATA_SGL_LENGTH_INVALID = 0x0f,
|
||||
NVME_SC_METADATA_SGL_LENGTH_INVALID = 0x10,
|
||||
NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID = 0x11,
|
||||
NVME_SC_INVALID_USE_OF_CMB = 0x12,
|
||||
NVME_SC_PRP_OFFSET_INVALID = 0x13,
|
||||
NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED = 0x14,
|
||||
NVME_SC_OPERATION_DENIED = 0x15,
|
||||
NVME_SC_SGL_OFFSET_INVALID = 0x16,
|
||||
/* 0x17 - reserved */
|
||||
NVME_SC_HOST_ID_INCONSISTENT_FORMAT = 0x18,
|
||||
NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED = 0x19,
|
||||
NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID = 0x1a,
|
||||
NVME_SC_ABORTED_DUE_TO_PREEMPT = 0x1b,
|
||||
NVME_SC_SANITIZE_FAILED = 0x1c,
|
||||
NVME_SC_SANITIZE_IN_PROGRESS = 0x1d,
|
||||
NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID = 0x1e,
|
||||
NVME_SC_NOT_SUPPORTED_IN_CMB = 0x1f,
|
||||
NVME_SC_NAMESPACE_IS_WRITE_PROTECTED = 0x20,
|
||||
NVME_SC_COMMAND_INTERRUPTED = 0x21,
|
||||
NVME_SC_TRANSIENT_TRANSPORT_ERROR = 0x22,
|
||||
|
||||
NVME_SC_LBA_OUT_OF_RANGE = 0x80,
|
||||
NVME_SC_CAPACITY_EXCEEDED = 0x81,
|
||||
NVME_SC_NAMESPACE_NOT_READY = 0x82,
|
||||
NVME_SC_RESERVATION_CONFLICT = 0x83,
|
||||
NVME_SC_FORMAT_IN_PROGRESS = 0x84,
|
||||
};
|
||||
|
||||
/* command specific status codes */
|
||||
enum nvme_command_specific_status_code {
|
||||
NVME_SC_COMPLETION_QUEUE_INVALID = 0x00,
|
||||
NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01,
|
||||
NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02,
|
||||
NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03,
|
||||
/* 0x04 - reserved */
|
||||
NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
|
||||
NVME_SC_INVALID_FIRMWARE_SLOT = 0x06,
|
||||
NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07,
|
||||
NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08,
|
||||
NVME_SC_INVALID_LOG_PAGE = 0x09,
|
||||
NVME_SC_INVALID_FORMAT = 0x0a,
|
||||
NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b,
|
||||
NVME_SC_INVALID_QUEUE_DELETION = 0x0c,
|
||||
NVME_SC_FEATURE_NOT_SAVEABLE = 0x0d,
|
||||
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x0e,
|
||||
NVME_SC_FEATURE_NOT_NS_SPECIFIC = 0x0f,
|
||||
NVME_SC_FW_ACT_REQUIRES_NVMS_RESET = 0x10,
|
||||
NVME_SC_FW_ACT_REQUIRES_RESET = 0x11,
|
||||
NVME_SC_FW_ACT_REQUIRES_TIME = 0x12,
|
||||
NVME_SC_FW_ACT_PROHIBITED = 0x13,
|
||||
NVME_SC_OVERLAPPING_RANGE = 0x14,
|
||||
NVME_SC_NS_INSUFFICIENT_CAPACITY = 0x15,
|
||||
NVME_SC_NS_ID_UNAVAILABLE = 0x16,
|
||||
/* 0x17 - reserved */
|
||||
NVME_SC_NS_ALREADY_ATTACHED = 0x18,
|
||||
NVME_SC_NS_IS_PRIVATE = 0x19,
|
||||
NVME_SC_NS_NOT_ATTACHED = 0x1a,
|
||||
NVME_SC_THIN_PROV_NOT_SUPPORTED = 0x1b,
|
||||
NVME_SC_CTRLR_LIST_INVALID = 0x1c,
|
||||
NVME_SC_SELF_TEST_IN_PROGRESS = 0x1d,
|
||||
NVME_SC_BOOT_PART_WRITE_PROHIB = 0x1e,
|
||||
NVME_SC_INVALID_CTRLR_ID = 0x1f,
|
||||
NVME_SC_INVALID_SEC_CTRLR_STATE = 0x20,
|
||||
NVME_SC_INVALID_NUM_OF_CTRLR_RESRC = 0x21,
|
||||
NVME_SC_INVALID_RESOURCE_ID = 0x22,
|
||||
NVME_SC_SANITIZE_PROHIBITED_WPMRE = 0x23,
|
||||
NVME_SC_ANA_GROUP_ID_INVALID = 0x24,
|
||||
NVME_SC_ANA_ATTACH_FAILED = 0x25,
|
||||
|
||||
NVME_SC_CONFLICTING_ATTRIBUTES = 0x80,
|
||||
NVME_SC_INVALID_PROTECTION_INFO = 0x81,
|
||||
NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82,
|
||||
};
|
||||
|
||||
/* media error status codes */
|
||||
enum nvme_media_error_status_code {
|
||||
NVME_SC_WRITE_FAULTS = 0x80,
|
||||
NVME_SC_UNRECOVERED_READ_ERROR = 0x81,
|
||||
NVME_SC_GUARD_CHECK_ERROR = 0x82,
|
||||
NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83,
|
||||
NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84,
|
||||
NVME_SC_COMPARE_FAILURE = 0x85,
|
||||
NVME_SC_ACCESS_DENIED = 0x86,
|
||||
NVME_SC_DEALLOCATED_OR_UNWRITTEN = 0x87,
|
||||
};
|
||||
|
||||
/* path related status codes */
|
||||
enum nvme_path_related_status_code {
|
||||
NVME_SC_INTERNAL_PATH_ERROR = 0x00,
|
||||
NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS = 0x01,
|
||||
NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE = 0x02,
|
||||
NVME_SC_ASYMMETRIC_ACCESS_TRANSITION = 0x03,
|
||||
NVME_SC_CONTROLLER_PATHING_ERROR = 0x60,
|
||||
NVME_SC_HOST_PATHING_ERROR = 0x70,
|
||||
NVME_SC_COMMAND_ABOTHED_BY_HOST = 0x71,
|
||||
};
|
||||
|
||||
/* admin opcodes */
|
||||
enum nvme_admin_opcode {
|
||||
NVME_OPC_DELETE_IO_SQ = 0x00,
|
||||
NVME_OPC_CREATE_IO_SQ = 0x01,
|
||||
NVME_OPC_GET_LOG_PAGE = 0x02,
|
||||
/* 0x03 - reserved */
|
||||
NVME_OPC_DELETE_IO_CQ = 0x04,
|
||||
NVME_OPC_CREATE_IO_CQ = 0x05,
|
||||
NVME_OPC_IDENTIFY = 0x06,
|
||||
/* 0x07 - reserved */
|
||||
NVME_OPC_ABORT = 0x08,
|
||||
NVME_OPC_SET_FEATURES = 0x09,
|
||||
NVME_OPC_GET_FEATURES = 0x0a,
|
||||
/* 0x0b - reserved */
|
||||
NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c,
|
||||
NVME_OPC_NAMESPACE_MANAGEMENT = 0x0d,
|
||||
/* 0x0e-0x0f - reserved */
|
||||
NVME_OPC_FIRMWARE_ACTIVATE = 0x10,
|
||||
NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11,
|
||||
/* 0x12-0x13 - reserved */
|
||||
NVME_OPC_DEVICE_SELF_TEST = 0x14,
|
||||
NVME_OPC_NAMESPACE_ATTACHMENT = 0x15,
|
||||
/* 0x16-0x17 - reserved */
|
||||
NVME_OPC_KEEP_ALIVE = 0x18,
|
||||
NVME_OPC_DIRECTIVE_SEND = 0x19,
|
||||
NVME_OPC_DIRECTIVE_RECEIVE = 0x1a,
|
||||
/* 0x1b - reserved */
|
||||
NVME_OPC_VIRTUALIZATION_MANAGEMENT = 0x1c,
|
||||
NVME_OPC_NVME_MI_SEND = 0x1d,
|
||||
NVME_OPC_NVME_MI_RECEIVE = 0x1e,
|
||||
/* 0x1f-0x7b - reserved */
|
||||
NVME_OPC_DOORBELL_BUFFER_CONFIG = 0x7c,
|
||||
|
||||
NVME_OPC_FORMAT_NVM = 0x80,
|
||||
NVME_OPC_SECURITY_SEND = 0x81,
|
||||
NVME_OPC_SECURITY_RECEIVE = 0x82,
|
||||
/* 0x83 - reserved */
|
||||
NVME_OPC_SANITIZE = 0x84,
|
||||
/* 0x85 - reserved */
|
||||
NVME_OPC_GET_LBA_STATUS = 0x86,
|
||||
};
|
||||
|
||||
/* nvme nvm opcodes */
|
||||
enum nvme_nvm_opcode {
|
||||
NVME_OPC_FLUSH = 0x00,
|
||||
NVME_OPC_WRITE = 0x01,
|
||||
NVME_OPC_READ = 0x02,
|
||||
/* 0x03 - reserved */
|
||||
NVME_OPC_WRITE_UNCORRECTABLE = 0x04,
|
||||
NVME_OPC_COMPARE = 0x05,
|
||||
/* 0x06-0x07 - reserved */
|
||||
NVME_OPC_WRITE_ZEROES = 0x08,
|
||||
NVME_OPC_DATASET_MANAGEMENT = 0x09,
|
||||
/* 0x0a-0x0b - reserved */
|
||||
NVME_OPC_VERIFY = 0x0c,
|
||||
NVME_OPC_RESERVATION_REGISTER = 0x0d,
|
||||
NVME_OPC_RESERVATION_REPORT = 0x0e,
|
||||
/* 0x0f-0x10 - reserved */
|
||||
NVME_OPC_RESERVATION_ACQUIRE = 0x11,
|
||||
/* 0x12-0x14 - reserved */
|
||||
NVME_OPC_RESERVATION_RELEASE = 0x15,
|
||||
};
|
||||
|
||||
enum nvme_feature {
|
||||
/* 0x00 - reserved */
|
||||
NVME_FEAT_ARBITRATION = 0x01,
|
||||
NVME_FEAT_POWER_MANAGEMENT = 0x02,
|
||||
NVME_FEAT_LBA_RANGE_TYPE = 0x03,
|
||||
NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04,
|
||||
NVME_FEAT_ERROR_RECOVERY = 0x05,
|
||||
NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06,
|
||||
NVME_FEAT_NUMBER_OF_QUEUES = 0x07,
|
||||
NVME_FEAT_INTERRUPT_COALESCING = 0x08,
|
||||
NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
|
||||
NVME_FEAT_WRITE_ATOMICITY = 0x0A,
|
||||
NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B,
|
||||
NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C,
|
||||
NVME_FEAT_HOST_MEMORY_BUFFER = 0x0D,
|
||||
NVME_FEAT_TIMESTAMP = 0x0E,
|
||||
NVME_FEAT_KEEP_ALIVE_TIMER = 0x0F,
|
||||
NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT = 0x10,
|
||||
NVME_FEAT_NON_OP_POWER_STATE_CONFIG = 0x11,
|
||||
NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG = 0x12,
|
||||
NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG = 0x13,
|
||||
NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW = 0x14,
|
||||
NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES = 0x15,
|
||||
NVME_FEAT_HOST_BEHAVIOR_SUPPORT = 0x16,
|
||||
NVME_FEAT_SANITIZE_CONFIG = 0x17,
|
||||
NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION = 0x18,
|
||||
/* 0x19-0x77 - reserved */
|
||||
/* 0x78-0x7f - NVMe Management Interface */
|
||||
NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80,
|
||||
NVME_FEAT_HOST_IDENTIFIER = 0x81,
|
||||
NVME_FEAT_RESERVATION_NOTIFICATION_MASK = 0x82,
|
||||
NVME_FEAT_RESERVATION_PERSISTENCE = 0x83,
|
||||
NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG = 0x84,
|
||||
/* 0x85-0xBF - command set specific (reserved) */
|
||||
/* 0xC0-0xFF - vendor specific */
|
||||
};
|
||||
|
||||
#if !defined(CONFIG_DCACHE_LINE_SIZE) || (CONFIG_DCACHE_LINE_SIZE == 0)
|
||||
#define CACHE_LINE_SIZE (64)
|
||||
#else
|
||||
#define CACHE_LINE_SIZE CONFIG_DCACHE_LINE_SIZE
|
||||
#endif
|
||||
|
||||
struct nvme_cmd_qpair {
|
||||
struct nvme_controller *ctrlr;
|
||||
uint32_t id;
|
||||
|
||||
uint32_t num_entries;
|
||||
|
||||
uint32_t sq_tdbl_off;
|
||||
uint32_t cq_hdbl_off;
|
||||
|
||||
uint32_t phase;
|
||||
uint32_t sq_head;
|
||||
uint32_t sq_tail;
|
||||
uint32_t cq_head;
|
||||
|
||||
int64_t num_cmds;
|
||||
int64_t num_intr_handler_calls;
|
||||
int64_t num_retries;
|
||||
int64_t num_failures;
|
||||
int64_t num_ignored;
|
||||
|
||||
struct nvme_command *cmd;
|
||||
struct nvme_completion *cpl;
|
||||
|
||||
uintptr_t cmd_bus_addr;
|
||||
uintptr_t cpl_bus_addr;
|
||||
|
||||
uint16_t vector;
|
||||
} __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
|
||||
|
||||
enum nvme_request_type {
|
||||
NVME_REQUEST_NULL = 1,
|
||||
NVME_REQUEST_VADDR = 2,
|
||||
};
|
||||
|
||||
struct nvme_request {
|
||||
struct nvme_command cmd;
|
||||
struct nvme_cmd_qpair *qpair;
|
||||
|
||||
uint32_t type;
|
||||
uint32_t req_start;
|
||||
int32_t retries;
|
||||
|
||||
void *payload;
|
||||
uint32_t payload_size;
|
||||
nvme_cb_fn_t cb_fn;
|
||||
void *cb_arg;
|
||||
|
||||
sys_dnode_t node;
|
||||
};
|
||||
|
||||
void nvme_cmd_init(void);
|
||||
|
||||
void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
|
||||
|
||||
void nvme_cmd_request_free(struct nvme_request *request);
|
||||
|
||||
struct nvme_request *nvme_cmd_request_alloc(void);
|
||||
|
||||
int nvme_cmd_qpair_setup(struct nvme_cmd_qpair *qpair,
|
||||
struct nvme_controller *ctrlr,
|
||||
uint32_t id);
|
||||
|
||||
void nvme_cmd_qpair_reset(struct nvme_cmd_qpair *qpair);
|
||||
|
||||
int nvme_cmd_qpair_submit_request(struct nvme_cmd_qpair *qpair,
|
||||
struct nvme_request *request);
|
||||
|
||||
int nvme_cmd_identify_controller(struct nvme_controller *ctrlr,
|
||||
void *payload,
|
||||
nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
|
||||
uint8_t feature, uint32_t cdw11,
|
||||
uint32_t cdw12, uint32_t cdw13,
|
||||
uint32_t cdw14, uint32_t cdw15,
|
||||
void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
|
||||
uint8_t feature, uint32_t cdw11,
|
||||
void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
int nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
|
||||
uint32_t num_queues,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg);
|
||||
|
||||
static inline
|
||||
struct nvme_request *nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
|
||||
request = nvme_cmd_request_alloc();
|
||||
if (request != NULL) {
|
||||
request->cb_fn = cb_fn;
|
||||
request->cb_arg = cb_arg;
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct nvme_request *nvme_allocate_request_vaddr(void *payload,
|
||||
uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
|
||||
request = nvme_allocate_request(cb_fn, cb_arg);
|
||||
if (request != NULL) {
|
||||
request->type = NVME_REQUEST_VADDR;
|
||||
request->payload = payload;
|
||||
request->payload_size = payload_size;
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
|
||||
static inline
|
||||
struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
|
||||
request = nvme_allocate_request(cb_fn, cb_arg);
|
||||
if (request != NULL) {
|
||||
request->type = NVME_REQUEST_NULL;
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
static inline void nvme_completion_swapbytes(struct nvme_completion *cpl)
|
||||
{
|
||||
#if _BYTE_ORDER != _LITTLE_ENDIAN
|
||||
cpl->cdw0 = sys_le32_to_cpu(cpl->cdw0);
|
||||
/* omit rsvd1 */
|
||||
cpl->sqhd = sys_le16_to_cpu(cpl->sqhd);
|
||||
cpl->sqid = sys_le16_to_cpu(cpl->sqid);
|
||||
/* omit cid */
|
||||
cpl->status = sys_le16_to_cpu(s->status);
|
||||
#else
|
||||
ARG_UNUSED(cpl);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline
|
||||
void nvme_completion_poll(struct nvme_completion_poll_status *status)
|
||||
{
|
||||
k_sem_take(&status->sem, K_FOREVER);
|
||||
}
|
||||
|
||||
#define NVME_CPL_STATUS_POLL_INIT(cpl_status) \
|
||||
{ \
|
||||
.status = 0, \
|
||||
.sem = Z_SEM_INITIALIZER(cpl_status.sem, 0, 1), \
|
||||
}
|
||||
|
||||
static inline
|
||||
void nvme_cpl_status_poll_init(struct nvme_completion_poll_status *status)
|
||||
{
|
||||
status->status = 0;
|
||||
k_sem_init(&status->sem, 0, 1);
|
||||
}
|
||||
|
||||
#define nvme_completion_is_error(cpl) \
|
||||
((NVME_STATUS_GET_SC((cpl)->status) != 0) | \
|
||||
(NVME_STATUS_GET_SCT((cpl)->status) != 0))
|
||||
|
||||
static inline
|
||||
bool nvme_cpl_status_is_error(struct nvme_completion_poll_status *status)
|
||||
{
|
||||
return ((status->status != 0) ||
|
||||
nvme_completion_is_error(&status->cpl));
|
||||
}
|
||||
|
||||
#endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ */
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ static int nvme_controller_enable(const struct device *dev)
|
|||
{
|
||||
struct nvme_controller *nvme_ctrlr = dev->data;
|
||||
mm_reg_t regs = DEVICE_MMIO_GET(dev);
|
||||
uint32_t cc, csts, aqa, qsize;
|
||||
uint8_t enabled, ready;
|
||||
uint32_t cc, csts;
|
||||
int err;
|
||||
|
||||
cc = nvme_mmio_read_4(regs, cc);
|
||||
|
|
@ -119,17 +119,6 @@ static int nvme_controller_enable(const struct device *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
nvme_mmio_write_8(regs, asq, nvme_ctrlr->adminq->cmd_bus_addr);
|
||||
nvme_mmio_write_8(regs, acq, nvme_ctrlr->adminq->cpl_bus_addr);
|
||||
|
||||
/* acqs and asqs are 0-based. */
|
||||
qsize = CONFIG_NVME_ADMIN_ENTRIES - 1;
|
||||
aqa = 0;
|
||||
aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
|
||||
aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
|
||||
|
||||
nvme_mmio_write_4(regs, aqa, aqa);
|
||||
|
||||
/* Initialization values for CC */
|
||||
cc = 0;
|
||||
cc |= 1 << NVME_CC_REG_EN_SHIFT;
|
||||
|
|
@ -145,6 +134,119 @@ static int nvme_controller_enable(const struct device *dev)
|
|||
return nvme_controller_wait_for_ready(dev, 1);
|
||||
}
|
||||
|
||||
static int nvme_controller_setup_admin_queues(const struct device *dev)
|
||||
{
|
||||
struct nvme_controller *nvme_ctrlr = dev->data;
|
||||
mm_reg_t regs = DEVICE_MMIO_GET(dev);
|
||||
uint32_t aqa, qsize;
|
||||
|
||||
nvme_cmd_qpair_reset(nvme_ctrlr->adminq);
|
||||
|
||||
/* Admin queue is always id 0 */
|
||||
if (nvme_cmd_qpair_setup(nvme_ctrlr->adminq, nvme_ctrlr, 0) != 0) {
|
||||
LOG_ERR("Admin cmd qpair setup failed");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvme_mmio_write_8(regs, asq, nvme_ctrlr->adminq->cmd_bus_addr);
|
||||
nvme_mmio_write_8(regs, acq, nvme_ctrlr->adminq->cpl_bus_addr);
|
||||
|
||||
/* acqs and asqs are 0-based. */
|
||||
qsize = CONFIG_NVME_ADMIN_ENTRIES - 1;
|
||||
aqa = 0;
|
||||
aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
|
||||
aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
|
||||
|
||||
nvme_mmio_write_4(regs, aqa, aqa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_controller_setup_io_queues(const struct device *dev)
|
||||
{
|
||||
struct nvme_controller *nvme_ctrlr = dev->data;
|
||||
struct nvme_completion_poll_status status;
|
||||
struct nvme_cmd_qpair *io_qpair;
|
||||
int cq_allocated, sq_allocated;
|
||||
int ret, idx;
|
||||
|
||||
nvme_cpl_status_poll_init(&status);
|
||||
|
||||
ret = nvme_ctrlr_cmd_set_num_queues(nvme_ctrlr,
|
||||
nvme_ctrlr->num_io_queues,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvme_completion_poll(&status);
|
||||
if (nvme_cpl_status_is_error(&status)) {
|
||||
LOG_ERR("Could not set IO num queues to %u",
|
||||
nvme_ctrlr->num_io_queues);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Data in cdw0 is 0-based.
|
||||
* Lower 16-bits indicate number of submission queues allocated.
|
||||
* Upper 16-bits indicate number of completion queues allocated.
|
||||
*/
|
||||
sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
|
||||
cq_allocated = (status.cpl.cdw0 >> 16) + 1;
|
||||
|
||||
/*
|
||||
* Controller may allocate more queues than we requested,
|
||||
* so use the minimum of the number requested and what was
|
||||
* actually allocated.
|
||||
*/
|
||||
nvme_ctrlr->num_io_queues = MIN(nvme_ctrlr->num_io_queues,
|
||||
sq_allocated);
|
||||
nvme_ctrlr->num_io_queues = MIN(nvme_ctrlr->num_io_queues,
|
||||
cq_allocated);
|
||||
|
||||
for (idx = 0; idx < nvme_ctrlr->num_io_queues; idx++) {
|
||||
io_qpair = &nvme_ctrlr->ioq[idx];
|
||||
if (nvme_cmd_qpair_setup(io_qpair, nvme_ctrlr, idx+1) != 0) {
|
||||
LOG_ERR("IO cmd qpair %u setup failed", idx+1);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvme_cmd_qpair_reset(io_qpair);
|
||||
|
||||
nvme_cpl_status_poll_init(&status);
|
||||
|
||||
ret = nvme_ctrlr_cmd_create_io_cq(nvme_ctrlr, io_qpair,
|
||||
nvme_completion_poll_cb,
|
||||
&status);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvme_completion_poll(&status);
|
||||
if (nvme_cpl_status_is_error(&status)) {
|
||||
LOG_ERR("IO CQ creation failed");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvme_cpl_status_poll_init(&status);
|
||||
|
||||
ret = nvme_ctrlr_cmd_create_io_sq(nvme_ctrlr, io_qpair,
|
||||
nvme_completion_poll_cb,
|
||||
&status);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvme_completion_poll(&status);
|
||||
if (nvme_cpl_status_is_error(&status)) {
|
||||
LOG_ERR("IO CQ creation failed");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvme_controller_gather_info(const struct device *dev)
|
||||
{
|
||||
struct nvme_controller *nvme_ctrlr = dev->data;
|
||||
|
|
@ -262,11 +364,43 @@ static int nvme_controller_pcie_configure(const struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_controller_identify(struct nvme_controller *nvme_ctrlr)
|
||||
{
|
||||
struct nvme_completion_poll_status status =
|
||||
NVME_CPL_STATUS_POLL_INIT(status);
|
||||
|
||||
nvme_ctrlr_cmd_identify_controller(nvme_ctrlr,
|
||||
nvme_completion_poll_cb, &status);
|
||||
nvme_completion_poll(&status);
|
||||
|
||||
if (nvme_cpl_status_is_error(&status)) {
|
||||
LOG_ERR("Could not identify the controller");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvme_controller_data_swapbytes(&nvme_ctrlr->cdata);
|
||||
|
||||
/*
|
||||
* Use MDTS to ensure our default max_xfer_size doesn't exceed what the
|
||||
* controller supports.
|
||||
*/
|
||||
if (nvme_ctrlr->cdata.mdts > 0) {
|
||||
nvme_ctrlr->max_xfer_size =
|
||||
MIN(nvme_ctrlr->max_xfer_size,
|
||||
1 << (nvme_ctrlr->cdata.mdts + NVME_MPS_SHIFT +
|
||||
NVME_CAP_HI_MPSMIN(nvme_ctrlr->cap_hi)));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_controller_init(const struct device *dev)
|
||||
{
|
||||
struct nvme_controller *nvme_ctrlr = dev->data;
|
||||
int ret;
|
||||
|
||||
nvme_cmd_init();
|
||||
|
||||
nvme_ctrlr->dev = dev;
|
||||
|
||||
ret = nvme_controller_pcie_configure(dev);
|
||||
|
|
@ -282,20 +416,40 @@ static int nvme_controller_init(const struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = nvme_controller_setup_admin_queues(dev);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvme_controller_enable(dev);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("Controller cannot be enabled");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvme_controller_setup_io_queues(dev);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvme_controller_identify(nvme_ctrlr);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define NVME_CONTROLLER_DEVICE_INIT(n) \
|
||||
DEVICE_PCIE_INST_DECLARE(n); \
|
||||
NVME_ADMINQ_ALLOCATE(n, CONFIG_NVME_ADMIN_ENTRIES); \
|
||||
NVME_IOQ_ALLOCATE(n, CONFIG_NVME_IO_ENTRIES); \
|
||||
\
|
||||
static struct nvme_controller nvme_ctrlr_data_##n = { \
|
||||
.id = n, \
|
||||
.num_io_queues = CONFIG_NVME_IO_QUEUES, \
|
||||
.adminq = &admin_##n, \
|
||||
.ioq = &io_##n, \
|
||||
}; \
|
||||
\
|
||||
static struct nvme_controller_config nvme_ctrlr_cfg_##n = \
|
||||
|
|
|
|||
198
drivers/disk/nvme/nvme_controller_cmd.c
Normal file
198
drivers/disk/nvme/nvme_controller_cmd.c
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright (c) 2022 Intel Corp.
|
||||
*/
|
||||
|
||||
#define LOG_LEVEL CONFIG_NVME_LOG_LEVEL
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_REGISTER(nvme_ctrlr_cmd);
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/sys/byteorder.h>
|
||||
|
||||
#include "nvme.h"
|
||||
#include "nvme_helpers.h"
|
||||
|
||||
int nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
|
||||
request = nvme_allocate_request_vaddr(
|
||||
&ctrlr->cdata, sizeof(struct nvme_controller_data),
|
||||
cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(&request->cmd, 0, sizeof(request->cmd));
|
||||
request->cmd.cdw0.opc = NVME_OPC_IDENTIFY;
|
||||
request->cmd.cdw10 = sys_cpu_to_le32(1);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_CREATE_IO_CQ;
|
||||
|
||||
/*
|
||||
* TODO: create a create io completion queue command data
|
||||
* structure.
|
||||
*/
|
||||
cmd->cdw10 = sys_cpu_to_le32(((io_queue->num_entries-1) << 16) |
|
||||
io_queue->id);
|
||||
/* 0x3 = interrupts enabled | physically contiguous */
|
||||
cmd->cdw11 = sys_cpu_to_le32((io_queue->vector << 16) | 0x3);
|
||||
cmd->dptr.prp1 = sys_cpu_to_le64(io_queue->cpl_bus_addr);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_CREATE_IO_SQ;
|
||||
|
||||
/*
|
||||
* TODO: create a create io submission queue command data
|
||||
* structure.
|
||||
*/
|
||||
cmd->cdw10 = sys_cpu_to_le32(((io_queue->num_entries - 1) << 16) |
|
||||
io_queue->id);
|
||||
/* 0x1 = physically contiguous */
|
||||
cmd->cdw11 = sys_cpu_to_le32((io_queue->id << 16) | 0x1);
|
||||
cmd->dptr.prp1 = sys_cpu_to_le64(io_queue->cmd_bus_addr);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_DELETE_IO_CQ;
|
||||
|
||||
/*
|
||||
* TODO: create a delete io completion queue command data
|
||||
* structure.
|
||||
*/
|
||||
cmd->cdw10 = sys_cpu_to_le32(io_queue->id);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_cmd_qpair *io_queue,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_DELETE_IO_SQ;
|
||||
|
||||
/*
|
||||
* TODO: create a delete io submission queue command data
|
||||
* structure.
|
||||
*/
|
||||
cmd->cdw10 = sys_cpu_to_le32(io_queue->id);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
|
||||
uint8_t feature, uint32_t cdw11,
|
||||
uint32_t cdw12, uint32_t cdw13,
|
||||
uint32_t cdw14, uint32_t cdw15,
|
||||
void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_SET_FEATURES;
|
||||
cmd->cdw10 = sys_cpu_to_le32(feature);
|
||||
cmd->cdw11 = sys_cpu_to_le32(cdw11);
|
||||
cmd->cdw12 = sys_cpu_to_le32(cdw12);
|
||||
cmd->cdw13 = sys_cpu_to_le32(cdw13);
|
||||
cmd->cdw14 = sys_cpu_to_le32(cdw14);
|
||||
cmd->cdw15 = sys_cpu_to_le32(cdw15);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
|
||||
uint8_t feature, uint32_t cdw11,
|
||||
void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *request;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
request = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
if (!request) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = &request->cmd;
|
||||
cmd->cdw0.opc = NVME_OPC_GET_FEATURES;
|
||||
cmd->cdw10 = sys_cpu_to_le32(feature);
|
||||
cmd->cdw11 = sys_cpu_to_le32(cdw11);
|
||||
|
||||
return nvme_cmd_qpair_submit_request(ctrlr->adminq, request);
|
||||
}
|
||||
|
||||
int nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
|
||||
uint32_t num_queues,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
uint32_t cdw11;
|
||||
|
||||
cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
|
||||
|
||||
return nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES,
|
||||
cdw11, 0, 0, 0, 0, NULL, 0,
|
||||
cb_fn, cb_arg);
|
||||
}
|
||||
Loading…
Reference in a new issue