lkml.org 
[lkml]   [2020]   [Jul]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v4 05/18] nitro_enclaves: Handle PCI device command requests
From
Date


On 02/07/2020 18:19, Alexander Graf wrote:
>
>
> On 22.06.20 22:03, Andra Paraschiv wrote:
>> The Nitro Enclaves PCI device exposes a MMIO space that this driver
>> uses to submit command requests and to receive command replies e.g. for
>> enclave creation / termination or setting enclave resources.
>>
>> Add logic for handling PCI device command requests based on the given
>> command type.
>>
>> Register an MSI-X interrupt vector for command reply notifications to
>> handle this type of communication events.
>>
>> Signed-off-by: Alexandru-Catalin Vasile <lexnv@amazon.com>
>> Signed-off-by: Andra Paraschiv <andraprs@amazon.com>
>>
>> Fix issue reported in:
>> https://lore.kernel.org/lkml/202004231644.xTmN4Z1z%25lkp@intel.com/
>>
>> Reported-by: kbuild test robot <lkp@intel.com>
>> Signed-off-by: Andra Paraschiv <andraprs@amazon.com>
>> ---
>> Changelog
>>
>> v3 -> v4
>>
>> * Use dev_err instead of custom NE log pattern.
>> * Return IRQ_NONE when interrupts are not handled.
>>
>> v2 -> v3
>>
>> * Remove the WARN_ON calls.
>> * Update static calls sanity checks.
>> * Remove "ratelimited" from the logs that are not in the ioctl call
>>    paths.
>>
>> v1 -> v2
>>
>> * Add log pattern for NE.
>> * Remove the BUG_ON calls.
>> * Update goto labels to match their purpose.
>> * Add fix for kbuild report.
>> ---
>>   drivers/virt/nitro_enclaves/ne_pci_dev.c | 232 +++++++++++++++++++++++
>>   1 file changed, 232 insertions(+)
>>
>> diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c
>> b/drivers/virt/nitro_enclaves/ne_pci_dev.c
>> index 235fa3ecbee2..c24230cfe7c0 100644
>> --- a/drivers/virt/nitro_enclaves/ne_pci_dev.c
>> +++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c
>> @@ -27,6 +27,218 @@ static const struct pci_device_id ne_pci_ids[] = {
>>     MODULE_DEVICE_TABLE(pci, ne_pci_ids);
>>   +/**
>> + * ne_submit_request - Submit command request to the PCI device
>> based on the
>> + * command type.
>> + *
>> + * This function gets called with the ne_pci_dev mutex held.
>> + *
>> + * @pdev: PCI device to send the command to.
>> + * @cmd_type: command type of the request sent to the PCI device.
>> + * @cmd_request: command request payload.
>> + * @cmd_request_size: size of the command request payload.
>> + *
>> + * @returns: 0 on success, negative return value on failure.
>> + */
>> +static int ne_submit_request(struct pci_dev *pdev,
>> +                 enum ne_pci_dev_cmd_type cmd_type,
>> +                 void *cmd_request, size_t cmd_request_size)
>> +{
>> +    struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
>> +
>> +    if (!ne_pci_dev || !ne_pci_dev->iomem_base)
>> +        return -EINVAL;
>
> How can this ever happen?

Removed this one and the next checks in v5 of the patch series.

Thanks,
Andra

>
>> +
>> +    memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request,
>> +            cmd_request_size);
>> +
>> +    iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND);
>> +
>> +    return 0;
>> +}
>> +
>> +/**
>> + * ne_retrieve_reply - Retrieve reply from the PCI device.
>> + *
>> + * This function gets called with the ne_pci_dev mutex held.
>> + *
>> + * @pdev: PCI device to receive the reply from.
>> + * @cmd_reply: command reply payload.
>> + * @cmd_reply_size: size of the command reply payload.
>> + *
>> + * @returns: 0 on success, negative return value on failure.
>> + */
>> +static int ne_retrieve_reply(struct pci_dev *pdev,
>> +                 struct ne_pci_dev_cmd_reply *cmd_reply,
>> +                 size_t cmd_reply_size)
>> +{
>> +    struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
>> +
>> +    if (!ne_pci_dev || !ne_pci_dev->iomem_base)
>> +        return -EINVAL;
>
> Same.
>
>> +
>> +    memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA,
>> +              cmd_reply_size);
>> +
>> +    return 0;
>> +}
>> +
>> +/**
>> + * ne_wait_for_reply - Wait for a reply of a PCI command.
>> + *
>> + * This function gets called with the ne_pci_dev mutex held.
>> + *
>> + * @pdev: PCI device for which a reply is waited.
>> + *
>> + * @returns: 0 on success, negative return value on failure.
>> + */
>> +static int ne_wait_for_reply(struct pci_dev *pdev)
>> +{
>> +    struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
>> +    int rc = -EINVAL;
>
> Unused assignment?
>
>> +
>> +    if (!ne_pci_dev)
>> +        return -EINVAL;
>
> Same.
>
>> +
>> +    /*
>> +     * TODO: Update to _interruptible and handle interrupted wait event
>> +     * e.g. -ERESTARTSYS, incoming signals + add / update timeout.
>> +     */
>> +    rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q,
>> + atomic_read(&ne_pci_dev->cmd_reply_avail) != 0,
>> +                msecs_to_jiffies(NE_DEFAULT_TIMEOUT_MSECS));
>> +    if (!rc)
>> +        return -ETIMEDOUT;
>> +
>> +    return 0;
>> +}
>> +
>> +int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type
>> cmd_type,
>> +          void *cmd_request, size_t cmd_request_size,
>> +          struct ne_pci_dev_cmd_reply *cmd_reply, size_t
>> cmd_reply_size)
>> +{
>> +    struct ne_pci_dev *ne_pci_dev = NULL;
>> +    int rc = -EINVAL;
>> +
>> +    if (!pdev)
>> +        return -ENODEV;
>
> When can this happen?
>
>> +
>> +    ne_pci_dev = pci_get_drvdata(pdev);
>> +    if (!ne_pci_dev || !ne_pci_dev->iomem_base)
>> +        return -EINVAL;
>
> Same
>
>> +
>> +    if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) {
>> +        dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n",
>> +                    cmd_type);
>> +
>> +        return -EINVAL;
>> +    }
>> +
>> +    if (!cmd_request) {
>> +        dev_err_ratelimited(&pdev->dev, "Null cmd request\n");
>> +
>> +        return -EINVAL;
>> +    }
>> +
>> +    if (cmd_request_size > NE_SEND_DATA_SIZE) {
>> +        dev_err_ratelimited(&pdev->dev,
>> +                    "Invalid req size=%zu for cmd type=%u\n",
>> +                    cmd_request_size, cmd_type);
>> +
>> +        return -EINVAL;
>> +    }
>> +
>> +    if (!cmd_reply) {
>> +        dev_err_ratelimited(&pdev->dev, "Null cmd reply\n");
>> +
>> +        return -EINVAL;
>> +    }
>> +
>> +    if (cmd_reply_size > NE_RECV_DATA_SIZE) {
>> +        dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu\n",
>> +                    cmd_reply_size);
>> +
>> +        return -EINVAL;
>> +    }
>> +
>> +    /*
>> +     * Use this mutex so that the PCI device handles one command
>> request at
>> +     * a time.
>> +     */
>> +    mutex_lock(&ne_pci_dev->pci_dev_mutex);
>> +
>> +    atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
>> +
>> +    rc = ne_submit_request(pdev, cmd_type, cmd_request,
>> cmd_request_size);
>> +    if (rc < 0) {
>> +        dev_err_ratelimited(&pdev->dev,
>> +                    "Error in submit request [rc=%d]\n", rc);
>> +
>> +        goto unlock_mutex;
>> +    }
>> +
>> +    rc = ne_wait_for_reply(pdev);
>> +    if (rc < 0) {
>> +        dev_err_ratelimited(&pdev->dev,
>> +                    "Error in wait for reply [rc=%d]\n", rc);
>> +
>> +        goto unlock_mutex;
>> +    }
>> +
>> +    rc = ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size);
>> +    if (rc < 0) {
>> +        dev_err_ratelimited(&pdev->dev,
>> +                    "Error in retrieve reply [rc=%d]\n", rc);
>> +
>> +        goto unlock_mutex;
>> +    }
>> +
>> +    atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
>> +
>> +    if (cmd_reply->rc < 0) {
>> +        dev_err_ratelimited(&pdev->dev,
>> +                    "Error in cmd process logic [rc=%d]\n",
>> +                    cmd_reply->rc);
>> +
>> +        rc = cmd_reply->rc;
>> +
>> +        goto unlock_mutex;
>> +    }
>> +
>> +    mutex_unlock(&ne_pci_dev->pci_dev_mutex);
>> +
>> +    return 0;
>
> Can you just set rc to 0 and fall through?

Done.

>
>> +
>> +unlock_mutex:
>> +    mutex_unlock(&ne_pci_dev->pci_dev_mutex);
>> +
>> +    return rc;
>> +}
>> +
>> +/**
>> + * ne_reply_handler - Interrupt handler for retrieving a reply matching
>> + * a request sent to the PCI device for enclave lifetime management.
>> + *
>> + * @irq: received interrupt for a reply sent by the PCI device.
>> + * @args: PCI device private data structure.
>> + *
>> + * @returns: IRQ_HANDLED on handled interrupt, IRQ_NONE otherwise.
>> + */
>> +static irqreturn_t ne_reply_handler(int irq, void *args)
>> +{
>> +    struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
>> +
>> +    if (!ne_pci_dev)
>> +        return IRQ_NONE;
>
> How can this ever happen?
>
>
> Alex
>
>> +
>> +    atomic_set(&ne_pci_dev->cmd_reply_avail, 1);
>> +
>> +    /* TODO: Update to _interruptible. */
>> +    wake_up(&ne_pci_dev->cmd_reply_wait_q);
>> +
>> +    return IRQ_HANDLED;
>> +}
>> +
>>   /**
>>    * ne_setup_msix - Setup MSI-X vectors for the PCI device.
>>    *
>> @@ -59,7 +271,25 @@ static int ne_setup_msix(struct pci_dev *pdev)
>>           return rc;
>>       }
>>   +    /*
>> +     * This IRQ gets triggered every time the PCI device responds to a
>> +     * command request. The reply is then retrieved, reading from
>> the MMIO
>> +     * space of the PCI device.
>> +     */
>> +    rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY),
>> +             ne_reply_handler, 0, "enclave_cmd", ne_pci_dev);
>> +    if (rc < 0) {
>> +        dev_err(&pdev->dev, "Error in request irq reply [rc=%d]\n",
>> rc);
>> +
>> +        goto free_irq_vectors;
>> +    }
>> +
>>       return 0;
>> +
>> +free_irq_vectors:
>> +    pci_free_irq_vectors(pdev);
>> +
>> +    return rc;
>>   }
>>     /**
>> @@ -74,6 +304,8 @@ static void ne_teardown_msix(struct pci_dev *pdev)
>>       if (!ne_pci_dev)
>>           return;
>>   +    free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
>> +
>>       pci_free_irq_vectors(pdev);
>>   }
>>




Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania. Registration number J22/2621/2005.

\
 
 \ /
  Last update: 2020-07-04 17:06    [W:0.060 / U:0.648 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site