diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c | 185 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c | 216 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c | 410 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h | 14 |
6 files changed, 474 insertions, 410 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild index b5665ada850a..004d1cd4e37e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild @@ -1,5 +1,8 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/falcon/base.o +nvkm-y += nvkm/falcon/cmdq.o +nvkm-y += nvkm/falcon/msgq.o +nvkm-y += nvkm/falcon/qmgr.o nvkm-y += nvkm/falcon/v1.o nvkm-y += nvkm/falcon/msgqueue.o nvkm-y += nvkm/falcon/msgqueue_0137c63d.o diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c new file mode 100644 index 000000000000..4785a563c183 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static bool +cmd_queue_has_room(struct nvkm_msgqueue *priv, + struct nvkm_msgqueue_queue *queue, u32 size, bool *rewind) +{ + struct nvkm_falcon *falcon = priv->falcon; + u32 head = nvkm_falcon_rd32(falcon, queue->head_reg); + u32 tail = nvkm_falcon_rd32(falcon, queue->tail_reg); + u32 free; + + size = ALIGN(size, QUEUE_ALIGNMENT); + + if (head >= tail) { + free = queue->offset + queue->size - head; + free -= HDR_SIZE; + + if (size > free) { + *rewind = true; + head = queue->offset; + } + } + + if (head < tail) + free = tail - head - 1; + + return size <= free; +} + +static int +cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + void *data, u32 size) +{ + nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0); + queue->position += ALIGN(size, QUEUE_ALIGNMENT); + return 0; +} + +/* REWIND unit is always 0x00 */ +#define MSGQUEUE_UNIT_REWIND 0x00 + +static void +cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) +{ + const struct nvkm_subdev *subdev = priv->falcon->owner; + struct nvkm_msgqueue_hdr cmd; + int err; + + cmd.unit_id = MSGQUEUE_UNIT_REWIND; + cmd.size = sizeof(cmd); + err = cmd_queue_push(priv, queue, &cmd, cmd.size); + if (err) + nvkm_error(subdev, "queue %d rewind failed\n", queue->index); + else + nvkm_error(subdev, "queue %d rewinded\n", queue->index); + + queue->position = queue->offset; +} + +static int +cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + u32 size) +{ + struct nvkm_falcon *falcon = priv->falcon; + const struct nvkm_subdev *subdev = priv->falcon->owner; + bool rewind = false; + + mutex_lock(&queue->mutex); + + if (!cmd_queue_has_room(priv, queue, size, &rewind)) { + nvkm_error(subdev, "queue full\n"); + mutex_unlock(&queue->mutex); + return -EAGAIN; + } + + queue->position = nvkm_falcon_rd32(falcon, queue->head_reg); + + if (rewind) + cmd_queue_rewind(priv, queue); + + return 0; +} + +static void +cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + bool commit) +{ + struct nvkm_falcon *falcon = priv->falcon; + + if (commit) + nvkm_falcon_wr32(falcon, queue->head_reg, queue->position); + + mutex_unlock(&queue->mutex); +} + +static int +cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd, + struct nvkm_msgqueue_queue *queue) +{ + const struct nvkm_subdev *subdev = priv->falcon->owner; + static unsigned timeout = 2000; + unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); + int ret = -EAGAIN; + bool commit = true; + + while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) + ret = cmd_queue_open(priv, queue, cmd->size); + if (ret) { + nvkm_error(subdev, "pmu_queue_open_write failed\n"); + return ret; + } + + ret = cmd_queue_push(priv, queue, cmd, cmd->size); + if (ret) { + nvkm_error(subdev, "pmu_queue_push failed\n"); + commit = false; + } + + cmd_queue_close(priv, queue, commit); + return ret; +} + +/* specifies that we want to know the command status in the answer message */ +#define CMD_FLAGS_STATUS BIT(0) +/* specifies that we want an interrupt when the answer message is queued */ +#define CMD_FLAGS_INTR BIT(1) + +int +nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio, + struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb, + struct completion *completion, bool wait_init) +{ + struct nvkm_msgqueue_seq *seq; + struct nvkm_msgqueue_queue *queue; + int ret; + + if (wait_init && !wait_for_completion_timeout(&priv->init_done, + msecs_to_jiffies(1000))) + return -ETIMEDOUT; + + queue = priv->func->cmd_queue(priv, prio); + if (IS_ERR(queue)) + return PTR_ERR(queue); + + seq = msgqueue_seq_acquire(priv); + if (IS_ERR(seq)) + return PTR_ERR(seq); + + cmd->seq_id = seq->id; + cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; + + seq->callback = cb; + seq->state = SEQ_STATE_USED; + seq->completion = completion; + + ret = cmd_write(priv, cmd, queue); + if (ret) { + seq->state = SEQ_STATE_PENDING; + msgqueue_seq_release(priv, seq); + } + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c new file mode 100644 index 000000000000..5db0b41bc2c8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static int +msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) +{ + struct nvkm_falcon *falcon = priv->falcon; + mutex_lock(&queue->mutex); + queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); + return 0; +} + +static void +msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + bool commit) +{ + struct nvkm_falcon *falcon = priv->falcon; + + if (commit) + nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); + + mutex_unlock(&queue->mutex); +} + +static bool +msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) +{ + struct nvkm_falcon *falcon = priv->falcon; + u32 head = nvkm_falcon_rd32(falcon, queue->head_reg); + u32 tail = nvkm_falcon_rd32(falcon, queue->tail_reg); + return head == tail; +} + +static int +msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + void *data, u32 size) +{ + struct nvkm_falcon *falcon = priv->falcon; + const struct nvkm_subdev *subdev = priv->falcon->owner; + u32 head, tail, available; + + head = nvkm_falcon_rd32(falcon, queue->head_reg); + /* has the buffer looped? */ + if (head < queue->position) + queue->position = queue->offset; + + tail = queue->position; + + available = head - tail; + + if (available == 0) { + nvkm_warn(subdev, "no message data available\n"); + return 0; + } + + if (size > available) { + nvkm_warn(subdev, "message data smaller than read request\n"); + size = available; + } + + nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data); + queue->position += ALIGN(size, QUEUE_ALIGNMENT); + return size; +} + +static int +msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, + struct nvkm_msgqueue_hdr *hdr) +{ + const struct nvkm_subdev *subdev = priv->falcon->owner; + int ret; + + ret = msg_queue_open(priv, queue); + if (ret) { + nvkm_error(subdev, "fail to open queue %d\n", queue->index); + return ret; + } + + if (msg_queue_empty(priv, queue)) { + ret = 0; + goto close; + } + + ret = msg_queue_pop(priv, queue, hdr, HDR_SIZE); + if (ret >= 0 && ret != HDR_SIZE) + ret = -EINVAL; + if (ret < 0) { + nvkm_error(subdev, "failed to read message header: %d\n", ret); + goto close; + } + + if (hdr->size > MSG_BUF_SIZE) { + nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); + ret = -ENOSPC; + goto close; + } + + if (hdr->size > HDR_SIZE) { + u32 read_size = hdr->size - HDR_SIZE; + + ret = msg_queue_pop(priv, queue, (hdr + 1), read_size); + if (ret >= 0 && ret != read_size) + ret = -EINVAL; + if (ret < 0) { + nvkm_error(subdev, "failed to read message: %d\n", ret); + goto close; + } + } + +close: + msg_queue_close(priv, queue, (ret >= 0)); + return ret; +} + +static int +msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr) +{ + const struct nvkm_subdev *subdev = priv->falcon->owner; + struct nvkm_msgqueue_seq *seq; + + seq = &priv->seq[hdr->seq_id]; + if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { + nvkm_error(subdev, "msg for unknown sequence %d", seq->id); + return -EINVAL; + } + + if (seq->state == SEQ_STATE_USED) { + if (seq->callback) + seq->callback(priv, hdr); + } + + if (seq->completion) + complete(seq->completion); + + msgqueue_seq_release(priv, seq); + return 0; +} + +static int +msgqueue_handle_init_msg(struct nvkm_msgqueue *priv, + struct nvkm_msgqueue_hdr *hdr) +{ + struct nvkm_falcon *falcon = priv->falcon; + const struct nvkm_subdev *subdev = falcon->owner; + const u32 tail_reg = falcon->func->msgq.tail; + u32 tail; + int ret; + + /* + * Read the message - queues are not initialized yet so we cannot rely + * on msg_queue_read() + */ + tail = nvkm_falcon_rd32(falcon, tail_reg); + nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr); + + if (hdr->size > MSG_BUF_SIZE) { + nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); + return -ENOSPC; + } + + nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0, + (hdr + 1)); + + tail += ALIGN(hdr->size, QUEUE_ALIGNMENT); + nvkm_falcon_wr32(falcon, tail_reg, tail); + + ret = priv->func->init_func->init_callback(priv, hdr); + if (ret) + return ret; + + return 0; +} + +void +nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv, + struct nvkm_msgqueue_queue *queue) +{ + /* + * We are invoked from a worker thread, so normally we have plenty of + * stack space to work with. + */ + u8 msg_buffer[MSG_BUF_SIZE]; + struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer; + int ret; + + /* the first message we receive must be the init message */ + if ((!priv->init_msg_received)) { + ret = msgqueue_handle_init_msg(priv, hdr); + if (!ret) + priv->init_msg_received = true; + } else { + while (msg_queue_read(priv, queue, hdr) > 0) + msgqueue_msg_handle(priv, hdr); + } +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c index 4d7039d8e498..eb2af85f3e3c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c @@ -26,416 +26,6 @@ #include <subdev/secboot.h> - -#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr) -#define QUEUE_ALIGNMENT 4 -/* max size of the messages we can receive */ -#define MSG_BUF_SIZE 128 - -static int -msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - - mutex_lock(&queue->mutex); - - queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return 0; -} - -static void -msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static bool -msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return head == tail; -} - -static int -msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 head, tail, available; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - /* has the buffer looped? */ - if (head < queue->position) - queue->position = queue->offset; - - tail = queue->position; - - available = head - tail; - - if (available == 0) { - nvkm_warn(subdev, "no message data available\n"); - return 0; - } - - if (size > available) { - nvkm_warn(subdev, "message data smaller than read request\n"); - size = available; - } - - nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return size; -} - -static int -msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - int err; - - err = msg_queue_open(priv, queue); - if (err) { - nvkm_error(subdev, "fail to open queue %d\n", queue->index); - return err; - } - - if (msg_queue_empty(priv, queue)) { - err = 0; - goto close; - } - - err = msg_queue_pop(priv, queue, hdr, HDR_SIZE); - if (err >= 0 && err != HDR_SIZE) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message header: %d\n", err); - goto close; - } - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - err = -ENOSPC; - goto close; - } - - if (hdr->size > HDR_SIZE) { - u32 read_size = hdr->size - HDR_SIZE; - - err = msg_queue_pop(priv, queue, (hdr + 1), read_size); - if (err >= 0 && err != read_size) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message: %d\n", err); - goto close; - } - } - -close: - msg_queue_close(priv, queue, (err >= 0)); - - return err; -} - -static bool -cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size, bool *rewind) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail, free; - - size = ALIGN(size, QUEUE_ALIGNMENT); - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - if (head >= tail) { - free = queue->offset + queue->size - head; - free -= HDR_SIZE; - - if (size > free) { - *rewind = true; - head = queue->offset; - } - } - - if (head < tail) - free = tail - head - 1; - - return size <= free; -} - -static int -cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return 0; -} - -/* REWIND unit is always 0x00 */ -#define MSGQUEUE_UNIT_REWIND 0x00 - -static void -cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_hdr cmd; - int err; - - cmd.unit_id = MSGQUEUE_UNIT_REWIND; - cmd.size = sizeof(cmd); - err = cmd_queue_push(priv, queue, &cmd, cmd.size); - if (err) - nvkm_error(subdev, "queue %d rewind failed\n", queue->index); - else - nvkm_error(subdev, "queue %d rewinded\n", queue->index); - - queue->position = queue->offset; -} - -static int -cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - bool rewind = false; - - mutex_lock(&queue->mutex); - - if (!cmd_queue_has_room(priv, queue, size, &rewind)) { - nvkm_error(subdev, "queue full\n"); - mutex_unlock(&queue->mutex); - return -EAGAIN; - } - - queue->position = nvkm_falcon_rd32(falcon, queue->head_reg); - - if (rewind) - cmd_queue_rewind(priv, queue); - - return 0; -} - -static void -cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->head_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static int -cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd, - struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - static unsigned timeout = 2000; - unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); - int ret = -EAGAIN; - bool commit = true; - - while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) - ret = cmd_queue_open(priv, queue, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_open_write failed\n"); - return ret; - } - - ret = cmd_queue_push(priv, queue, cmd, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_push failed\n"); - commit = false; - } - - cmd_queue_close(priv, queue, commit); - - return ret; -} - -static struct nvkm_msgqueue_seq * -msgqueue_seq_acquire(struct nvkm_msgqueue *priv) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - u32 index; - - mutex_lock(&priv->seq_lock); - - index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES); - - if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) { - nvkm_error(subdev, "no free sequence available\n"); - mutex_unlock(&priv->seq_lock); - return ERR_PTR(-EAGAIN); - } - - set_bit(index, priv->seq_tbl); - - mutex_unlock(&priv->seq_lock); - - seq = &priv->seq[index]; - seq->state = SEQ_STATE_PENDING; - - return seq; -} - -static void -msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq) -{ - /* no need to acquire seq_lock since clear_bit is atomic */ - seq->state = SEQ_STATE_FREE; - seq->callback = NULL; - seq->completion = NULL; - clear_bit(seq->id, priv->seq_tbl); -} - -/* specifies that we want to know the command status in the answer message */ -#define CMD_FLAGS_STATUS BIT(0) -/* specifies that we want an interrupt when the answer message is queued */ -#define CMD_FLAGS_INTR BIT(1) - -int -nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio, - struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb, - struct completion *completion, bool wait_init) -{ - struct nvkm_msgqueue_seq *seq; - struct nvkm_msgqueue_queue *queue; - int ret; - - if (wait_init && !wait_for_completion_timeout(&priv->init_done, - msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - queue = priv->func->cmd_queue(priv, prio); - if (IS_ERR(queue)) - return PTR_ERR(queue); - - seq = msgqueue_seq_acquire(priv); - if (IS_ERR(seq)) - return PTR_ERR(seq); - - cmd->seq_id = seq->id; - cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; - - seq->callback = cb; - seq->state = SEQ_STATE_USED; - seq->completion = completion; - - ret = cmd_write(priv, cmd, queue); - if (ret) { - seq->state = SEQ_STATE_PENDING; - msgqueue_seq_release(priv, seq); - } - - return ret; -} - -static int -msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - - seq = &priv->seq[hdr->seq_id]; - if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { - nvkm_error(subdev, "msg for unknown sequence %d", seq->id); - return -EINVAL; - } - - if (seq->state == SEQ_STATE_USED) { - if (seq->callback) - seq->callback(priv, hdr); - } - - if (seq->completion) - complete(seq->completion); - - msgqueue_seq_release(priv, seq); - - return 0; -} - -static int -msgqueue_handle_init_msg(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = falcon->owner; - const u32 tail_reg = falcon->func->msgq.tail; - u32 tail; - int ret; - - /* - * Read the message - queues are not initialized yet so we cannot rely - * on msg_queue_read() - */ - tail = nvkm_falcon_rd32(falcon, tail_reg); - nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr); - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - return -ENOSPC; - } - - nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0, - (hdr + 1)); - - tail += ALIGN(hdr->size, QUEUE_ALIGNMENT); - nvkm_falcon_wr32(falcon, tail_reg, tail); - - ret = priv->func->init_func->init_callback(priv, hdr); - if (ret) - return ret; - - return 0; -} - -void -nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_queue *queue) -{ - /* - * We are invoked from a worker thread, so normally we have plenty of - * stack space to work with. - */ - u8 msg_buffer[MSG_BUF_SIZE]; - struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer; - int ret; - - /* the first message we receive must be the init message */ - if ((!priv->init_msg_received)) { - ret = msgqueue_handle_init_msg(priv, hdr); - if (!ret) - priv->init_msg_received = true; - } else { - while (msg_queue_read(priv, queue, hdr) > 0) - msgqueue_msg_handle(priv, hdr); - } -} - void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf) { diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c new file mode 100644 index 000000000000..edf941559aec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +struct nvkm_msgqueue_seq * +msgqueue_seq_acquire(struct nvkm_msgqueue *priv) +{ + const struct nvkm_subdev *subdev = priv->falcon->owner; + struct nvkm_msgqueue_seq *seq; + u32 index; + + mutex_lock(&priv->seq_lock); + index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES); + if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) { + nvkm_error(subdev, "no free sequence available\n"); + mutex_unlock(&priv->seq_lock); + return ERR_PTR(-EAGAIN); + } + + set_bit(index, priv->seq_tbl); + mutex_unlock(&priv->seq_lock); + + seq = &priv->seq[index]; + seq->state = SEQ_STATE_PENDING; + return seq; +} + +void +msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq) +{ + /* no need to acquire seq_lock since clear_bit is atomic */ + seq->state = SEQ_STATE_FREE; + seq->callback = NULL; + seq->completion = NULL; + clear_bit(seq->id, priv->seq_tbl); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h new file mode 100644 index 000000000000..db00c5ee6124 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_FALCON_QMGR_H__ +#define __NVKM_FALCON_QMGR_H__ +#include <core/falcon.h> +#include "msgqueue.h" + +#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr) +#define QUEUE_ALIGNMENT 4 +/* max size of the messages we can receive */ +#define MSG_BUF_SIZE 128 + +struct nvkm_msgqueue_seq *msgqueue_seq_acquire(struct nvkm_msgqueue *); +void msgqueue_seq_release(struct nvkm_msgqueue *, struct nvkm_msgqueue_seq *); +#endif |