[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH for 8.0 v7 07/10] vdpa: Add asid parameter to vhost_vdpa_dma_
From: |
Jason Wang |
Subject: |
Re: [PATCH for 8.0 v7 07/10] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap |
Date: |
Thu, 17 Nov 2022 13:45:46 +0800 |
On Wed, Nov 16, 2022 at 11:06 PM Eugenio Pérez <[email protected]> wrote:
>
> So the caller can choose which ASID is destined.
>
> No need to update the batch functions as they will always be called from
> memory listener updates at the moment. Memory listener updates will
> always update ASID 0, as it's the passthrough ASID.
>
> All vhost devices's ASID are 0 at this moment.
>
> Signed-off-by: Eugenio Pérez <[email protected]>
Acked-by: Jason Wang <[email protected]>
Thanks
> ---
> v7:
> * Move comment on zero initailization of vhost_vdpa_dma_map above the
> functions.
> * Add VHOST_VDPA_GUEST_PA_ASID macro.
>
> v5:
> * Solve conflict, now vhost_vdpa_svq_unmap_ring returns void
> * Change comment on zero initialization.
>
> v4: Add comment specifying behavior if device does not support _F_ASID
>
> v3: Deleted unneeded space
> ---
> include/hw/virtio/vhost-vdpa.h | 14 ++++++++++---
> hw/virtio/vhost-vdpa.c | 36 +++++++++++++++++++++++-----------
> net/vhost-vdpa.c | 6 +++---
> hw/virtio/trace-events | 4 ++--
> 4 files changed, 41 insertions(+), 19 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 1111d85643..e57dfa1fd1 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -19,6 +19,12 @@
> #include "hw/virtio/virtio.h"
> #include "standard-headers/linux/vhost_types.h"
>
> +/*
> + * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA
> to
> + * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
> + */
> +#define VHOST_VDPA_GUEST_PA_ASID 0
> +
> typedef struct VhostVDPAHostNotifier {
> MemoryRegion mr;
> void *addr;
> @@ -29,6 +35,7 @@ typedef struct vhost_vdpa {
> int index;
> uint32_t msg_type;
> bool iotlb_batch_begin_sent;
> + uint32_t address_space_id;
> MemoryListener listener;
> struct vhost_vdpa_iova_range iova_range;
> uint64_t acked_features;
> @@ -42,8 +49,9 @@ typedef struct vhost_vdpa {
> VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> } VhostVDPA;
>
> -int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
> - void *vaddr, bool readonly);
> -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
> +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> + hwaddr size, void *vaddr, bool readonly);
> +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> + hwaddr size);
>
> #endif
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 23efb8f49d..1e4e1cb523 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -72,22 +72,28 @@ static bool
> vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> return false;
> }
>
> -int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
> - void *vaddr, bool readonly)
> +/*
> + * The caller must set asid = 0 if the device does not support asid.
> + * This is not an ABI break since it is set to 0 by the initializer anyway.
> + */
> +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> + hwaddr size, void *vaddr, bool readonly)
> {
> struct vhost_msg_v2 msg = {};
> int fd = v->device_fd;
> int ret = 0;
>
> msg.type = v->msg_type;
> + msg.asid = asid;
> msg.iotlb.iova = iova;
> msg.iotlb.size = size;
> msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
> msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
> msg.iotlb.type = VHOST_IOTLB_UPDATE;
>
> - trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
> - msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
> + trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
> + msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
> + msg.iotlb.type);
>
> if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> error_report("failed to write, fd=%d, errno=%d (%s)",
> @@ -98,18 +104,24 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr
> iova, hwaddr size,
> return ret;
> }
>
> -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
> +/*
> + * The caller must set asid = 0 if the device does not support asid.
> + * This is not an ABI break since it is set to 0 by the initializer anyway.
> + */
> +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
> + hwaddr size)
> {
> struct vhost_msg_v2 msg = {};
> int fd = v->device_fd;
> int ret = 0;
>
> msg.type = v->msg_type;
> + msg.asid = asid;
> msg.iotlb.iova = iova;
> msg.iotlb.size = size;
> msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
>
> - trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
> + trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
> msg.iotlb.size, msg.iotlb.type);
>
> if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> @@ -229,8 +241,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener
> *listener,
> }
>
> vhost_vdpa_iotlb_batch_begin_once(v);
> - ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> - vaddr, section->readonly);
> + ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
> + int128_get64(llsize), vaddr, section->readonly);
> if (ret) {
> error_report("vhost vdpa map fail!");
> goto fail_map;
> @@ -303,7 +315,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener
> *listener,
> vhost_iova_tree_remove(v->iova_tree, *result);
> }
> vhost_vdpa_iotlb_batch_begin_once(v);
> - ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
> + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
> + int128_get64(llsize));
> if (ret) {
> error_report("vhost_vdpa dma unmap error!");
> }
> @@ -884,7 +897,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa
> *v, hwaddr addr)
> }
>
> size = ROUND_UP(result->size, qemu_real_host_page_size());
> - r = vhost_vdpa_dma_unmap(v, result->iova, size);
> + r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
> if (unlikely(r < 0)) {
> error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r),
> -r);
> return;
> @@ -924,7 +937,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v,
> DMAMap *needle,
> return false;
> }
>
> - r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
> + r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
> + needle->size + 1,
> (void *)(uintptr_t)needle->translated_addr,
> needle->perm == IOMMU_RO);
> if (unlikely(r != 0)) {
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index dd9cea42d0..89b01fcaec 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -258,7 +258,7 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
> *v, void *addr)
> return;
> }
>
> - r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
> + r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size +
> 1);
> if (unlikely(r != 0)) {
> error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> }
> @@ -298,8 +298,8 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v,
> void *buf, size_t size,
> return r;
> }
>
> - r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(),
> buf,
> - !write);
> + r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
> + vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
> if (unlikely(r < 0)) {
> goto dma_map_err;
> }
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 820dadc26c..0ad9390307 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -30,8 +30,8 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d
> flags:0x%"PRIx32""
> vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
>
> # vhost-vdpa.c
> -vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova,
> uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d
> msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64"
> perm: 0x%"PRIx8" type: %"PRIu8
> -vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova,
> uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova:
> 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid,
> uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type)
> "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size:
> 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
> +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid,
> uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type:
> %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t
> type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type)
> "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend,
> void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64"
> vaddr: %p read-only: %d"
> --
> 2.31.1
>
- [PATCH for 8.0 v7 00/10] ASID support in vhost-vdpa net, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 02/10] vhost: set SVQ device call handler at SVQ start, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 01/10] vdpa: Use v->shadow_vqs_enabled in vhost_vdpa_svqs_start & stop, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 03/10] vhost: Allocate SVQ device file descriptors at device start, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 04/10] vdpa: add vhost_vdpa_net_valid_svq_features, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 06/10] vdpa: Allocate SVQ unconditionally, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 07/10] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap, Eugenio Pérez, 2022/11/16
- Re: [PATCH for 8.0 v7 07/10] vdpa: Add asid parameter to vhost_vdpa_dma_map/unmap,
Jason Wang <=
- [PATCH for 8.0 v7 08/10] vdpa: Store x-svq parameter in VhostVDPAState, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 09/10] vdpa: Add shadow_data to vhost_vdpa, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 10/10] vdpa: Always start CVQ in SVQ mode if possible, Eugenio Pérez, 2022/11/16
- [PATCH for 8.0 v7 05/10] vdpa: move SVQ vring features check to net/, Eugenio Pérez, 2022/11/16