diff options
author | Sandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com> | 2024-01-19 13:41:55 -0700 |
---|---|---|
committer | Mark Hatle <mark.hatle@amd.com> | 2024-01-22 16:31:41 -0700 |
commit | 4eccda49693a2c75f906dffda47abbeb4197d12e (patch) | |
tree | 16f583c98d89590757918584886d4a0dd6373df7 /meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | |
parent | d925a82f3631bd20648a91bc23d5746e457c9b8b (diff) | |
download | meta-xilinx-4eccda49693a2c75f906dffda47abbeb4197d12e.tar.gz |
qemu: Rename qemu-xen to qemu-xen_7.1
Rename qemu-xen to qemu-xen_7.1 inclusion file and update the
same in qemu-xilinx_2023 qemu_7.1 recipes.
Signed-off-by: Sandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com>
Signed-off-by: Mark Hatle <mark.hatle@amd.com>
Diffstat (limited to 'meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch')
-rw-r--r-- | meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | 150 |
1 files changed, 150 insertions, 0 deletions
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch new file mode 100644 index 00000000..db6d8fe5 --- /dev/null +++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | |||
@@ -0,0 +1,150 @@ | |||
1 | From bd32a130ca633eae7cf0f4ff0fa856004d413df0 Mon Sep 17 00:00:00 2001 | ||
2 | From: Juergen Gross <jgross@suse.com> | ||
3 | Date: Thu, 27 May 2021 15:27:55 +0200 | ||
4 | Subject: [PATCH 5/8] memory: add MemoryRegion map and unmap callbacks | ||
5 | |||
6 | In order to support mapping and unmapping guest memory dynamically to | ||
7 | and from qemu during address_space_[un]map() operations add the map() | ||
8 | and unmap() callbacks to MemoryRegionOps. | ||
9 | |||
10 | Those will be used e.g. for Xen grant mappings when performing guest | ||
11 | I/Os. | ||
12 | |||
13 | Signed-off-by: Juergen Gross <jgross@suse.com> | ||
14 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> | ||
15 | Acked-by: Stefano Stabellini <stefano.stabellini@amd.com> | ||
16 | --- | ||
17 | include/exec/memory.h | 19 +++++++++++++++++ | ||
18 | softmmu/physmem.c | 47 +++++++++++++++++++++++++++++++++---------- | ||
19 | 2 files changed, 55 insertions(+), 11 deletions(-) | ||
20 | |||
21 | diff --git a/include/exec/memory.h b/include/exec/memory.h | ||
22 | index bfb1de8eea..19e2aac694 100644 | ||
23 | --- a/include/exec/memory.h | ||
24 | +++ b/include/exec/memory.h | ||
25 | @@ -245,6 +245,25 @@ struct MemoryRegionOps { | ||
26 | unsigned size, | ||
27 | MemTxAttrs attrs); | ||
28 | |||
29 | + /* Dynamically create mapping. @addr is the guest address to map; @plen | ||
30 | + * is the pointer to the usable length of the buffer. | ||
31 | + * @mr contents can be changed in case a new memory region is created for | ||
32 | + * the mapping. | ||
33 | + * Returns the buffer address for accessing the data. */ | ||
34 | + void *(*map)(MemoryRegion **mr, | ||
35 | + hwaddr addr, | ||
36 | + hwaddr *plen, | ||
37 | + bool is_write, | ||
38 | + MemTxAttrs attrs); | ||
39 | + | ||
40 | + /* Unmap an area obtained via map() before. */ | ||
41 | + void (*unmap)(MemoryRegion *mr, | ||
42 | + void *buffer, | ||
43 | + ram_addr_t addr, | ||
44 | + hwaddr len, | ||
45 | + bool is_write, | ||
46 | + hwaddr access_len); | ||
47 | + | ||
48 | enum device_endian endianness; | ||
49 | /* Guest-visible constraints: */ | ||
50 | struct { | ||
51 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
52 | index 439a53a1be..2038240311 100644 | ||
53 | --- a/softmmu/physmem.c | ||
54 | +++ b/softmmu/physmem.c | ||
55 | @@ -3237,7 +3237,7 @@ void *address_space_map(AddressSpace *as, | ||
56 | hwaddr len = *plen; | ||
57 | hwaddr l, xlat; | ||
58 | MemoryRegion *mr; | ||
59 | - void *ptr; | ||
60 | + void *ptr = NULL; | ||
61 | FlatView *fv; | ||
62 | |||
63 | if (len == 0) { | ||
64 | @@ -3273,10 +3273,17 @@ void *address_space_map(AddressSpace *as, | ||
65 | |||
66 | |||
67 | memory_region_ref(mr); | ||
68 | + | ||
69 | + if (mr->ops && mr->ops->map) { | ||
70 | + ptr = mr->ops->map(&mr, addr, plen, is_write, attrs); | ||
71 | + } | ||
72 | + | ||
73 | *plen = flatview_extend_translation(fv, addr, len, mr, xlat, | ||
74 | l, is_write, attrs); | ||
75 | fuzz_dma_read_cb(addr, *plen, mr); | ||
76 | - ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); | ||
77 | + if (ptr == NULL) { | ||
78 | + ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); | ||
79 | + } | ||
80 | |||
81 | return ptr; | ||
82 | } | ||
83 | @@ -3294,11 +3301,16 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, | ||
84 | |||
85 | mr = memory_region_from_host(buffer, &addr1); | ||
86 | assert(mr != NULL); | ||
87 | - if (is_write) { | ||
88 | - invalidate_and_set_dirty(mr, addr1, access_len); | ||
89 | - } | ||
90 | - if (xen_enabled()) { | ||
91 | - xen_invalidate_map_cache_entry(buffer); | ||
92 | + | ||
93 | + if (mr->ops && mr->ops->unmap) { | ||
94 | + mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len); | ||
95 | + } else { | ||
96 | + if (is_write) { | ||
97 | + invalidate_and_set_dirty(mr, addr1, access_len); | ||
98 | + } | ||
99 | + if (xen_enabled()) { | ||
100 | + xen_invalidate_map_cache_entry(buffer); | ||
101 | + } | ||
102 | } | ||
103 | memory_region_unref(mr); | ||
104 | return; | ||
105 | @@ -3370,10 +3382,17 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, | ||
106 | * doing this if we found actual RAM, which behaves the same | ||
107 | * regardless of attributes; so UNSPECIFIED is fine. | ||
108 | */ | ||
109 | + if (mr->ops && mr->ops->map) { | ||
110 | + cache->ptr = mr->ops->map(&mr, addr, &l, is_write, | ||
111 | + MEMTXATTRS_UNSPECIFIED); | ||
112 | + } | ||
113 | + | ||
114 | l = flatview_extend_translation(cache->fv, addr, len, mr, | ||
115 | cache->xlat, l, is_write, | ||
116 | MEMTXATTRS_UNSPECIFIED); | ||
117 | - cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); | ||
118 | + if (!cache->ptr) { | ||
119 | + cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); | ||
120 | + } | ||
121 | } else { | ||
122 | cache->ptr = NULL; | ||
123 | } | ||
124 | @@ -3395,14 +3414,20 @@ void address_space_cache_invalidate(MemoryRegionCache *cache, | ||
125 | |||
126 | void address_space_cache_destroy(MemoryRegionCache *cache) | ||
127 | { | ||
128 | - if (!cache->mrs.mr) { | ||
129 | + MemoryRegion *mr = cache->mrs.mr; | ||
130 | + | ||
131 | + if (!mr) { | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | - if (xen_enabled()) { | ||
136 | + if (mr->ops && mr->ops->unmap) { | ||
137 | + mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len, | ||
138 | + cache->is_write, cache->len); | ||
139 | + } else if (xen_enabled()) { | ||
140 | xen_invalidate_map_cache_entry(cache->ptr); | ||
141 | } | ||
142 | - memory_region_unref(cache->mrs.mr); | ||
143 | + | ||
144 | + memory_region_unref(mr); | ||
145 | flatview_unref(cache->fv); | ||
146 | cache->mrs.mr = NULL; | ||
147 | cache->fv = NULL; | ||
148 | -- | ||
149 | 2.25.1 | ||
150 | |||