diff options
author | Sandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com> | 2024-01-19 13:41:59 -0700 |
---|---|---|
committer | Mark Hatle <mark.hatle@amd.com> | 2024-01-22 16:31:41 -0700 |
commit | c192fca9f62b732b5abde4c3633b13e839d654dc (patch) | |
tree | 695ca142836556ee1ce40e70c18edd4d3352925f /meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | |
parent | d1d32d3b7b052823dda679e11749678bf6eb9fab (diff) | |
download | meta-xilinx-c192fca9f62b732b5abde4c3633b13e839d654dc.tar.gz |
qemu: Add Xen 4.18 patches
Add Xen 4.18 patches and limit patches to target recipes only.
Signed-off-by: Sandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com>
Signed-off-by: Mark Hatle <mark.hatle@amd.com>
Diffstat (limited to 'meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch')
-rw-r--r-- | meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | 155 |
1 files changed, 155 insertions, 0 deletions
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch new file mode 100644 index 00000000..fa18ef16 --- /dev/null +++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-8.1/0009-memory-add-MemoryRegion-map-and-unmap-callbacks.patch | |||
@@ -0,0 +1,155 @@ | |||
1 | From 364a11be6274336ec9b0f06f3272f964d27c9349 Mon Sep 17 00:00:00 2001 | ||
2 | From: Juergen Gross <jgross@suse.com> | ||
3 | Date: Thu, 27 May 2021 15:27:55 +0200 | ||
4 | Subject: [PATCH 09/11] memory: add MemoryRegion map and unmap callbacks | ||
5 | |||
6 | In order to support mapping and unmapping guest memory dynamically to | ||
7 | and from qemu during address_space_[un]map() operations add the map() | ||
8 | and unmap() callbacks to MemoryRegionOps. | ||
9 | |||
10 | Those will be used e.g. for Xen grant mappings when performing guest | ||
11 | I/Os. | ||
12 | |||
13 | Signed-off-by: Juergen Gross <jgross@suse.com> | ||
14 | Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> | ||
15 | --- | ||
16 | include/exec/memory.h | 21 ++++++++++++++++++ | ||
17 | softmmu/physmem.c | 50 +++++++++++++++++++++++++++++++++---------- | ||
18 | 2 files changed, 60 insertions(+), 11 deletions(-) | ||
19 | |||
20 | diff --git a/include/exec/memory.h b/include/exec/memory.h | ||
21 | index 68284428f8..55414417ab 100644 | ||
22 | --- a/include/exec/memory.h | ||
23 | +++ b/include/exec/memory.h | ||
24 | @@ -274,6 +274,27 @@ struct MemoryRegionOps { | ||
25 | unsigned size, | ||
26 | MemTxAttrs attrs); | ||
27 | |||
28 | + /* | ||
29 | + * Dynamically create mapping. @addr is the guest address to map; @plen | ||
30 | + * is the pointer to the usable length of the buffer. | ||
31 | + * @mr contents can be changed in case a new memory region is created for | ||
32 | + * the mapping. | ||
33 | + * Returns the buffer address for accessing the data. | ||
34 | + */ | ||
35 | + void *(*map)(MemoryRegion **mr, | ||
36 | + hwaddr addr, | ||
37 | + hwaddr *plen, | ||
38 | + bool is_write, | ||
39 | + MemTxAttrs attrs); | ||
40 | + | ||
41 | + /* Unmap an area obtained via map() before. */ | ||
42 | + void (*unmap)(MemoryRegion *mr, | ||
43 | + void *buffer, | ||
44 | + ram_addr_t addr, | ||
45 | + hwaddr len, | ||
46 | + bool is_write, | ||
47 | + hwaddr access_len); | ||
48 | + | ||
49 | enum device_endian endianness; | ||
50 | /* Guest-visible constraints: */ | ||
51 | struct { | ||
52 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
53 | index a934e44fe7..a1e2030424 100644 | ||
54 | --- a/softmmu/physmem.c | ||
55 | +++ b/softmmu/physmem.c | ||
56 | @@ -3070,6 +3070,7 @@ void *address_space_map(AddressSpace *as, | ||
57 | hwaddr len = *plen; | ||
58 | hwaddr l, xlat; | ||
59 | MemoryRegion *mr; | ||
60 | + void *ptr = NULL; | ||
61 | FlatView *fv; | ||
62 | |||
63 | if (len == 0) { | ||
64 | @@ -3103,12 +3104,20 @@ void *address_space_map(AddressSpace *as, | ||
65 | return bounce.buffer; | ||
66 | } | ||
67 | |||
68 | - | ||
69 | memory_region_ref(mr); | ||
70 | + | ||
71 | + if (mr->ops && mr->ops->map) { | ||
72 | + ptr = mr->ops->map(&mr, addr, plen, is_write, attrs); | ||
73 | + } | ||
74 | + | ||
75 | *plen = flatview_extend_translation(fv, addr, len, mr, xlat, | ||
76 | l, is_write, attrs); | ||
77 | fuzz_dma_read_cb(addr, *plen, mr); | ||
78 | - return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); | ||
79 | + if (ptr == NULL) { | ||
80 | + ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); | ||
81 | + } | ||
82 | + | ||
83 | + return ptr; | ||
84 | } | ||
85 | |||
86 | /* Unmaps a memory region previously mapped by address_space_map(). | ||
87 | @@ -3124,11 +3133,16 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, | ||
88 | |||
89 | mr = memory_region_from_host(buffer, &addr1); | ||
90 | assert(mr != NULL); | ||
91 | - if (is_write) { | ||
92 | - invalidate_and_set_dirty(mr, addr1, access_len); | ||
93 | - } | ||
94 | - if (xen_enabled()) { | ||
95 | - xen_invalidate_map_cache_entry(buffer); | ||
96 | + | ||
97 | + if (mr->ops && mr->ops->unmap) { | ||
98 | + mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len); | ||
99 | + } else { | ||
100 | + if (is_write) { | ||
101 | + invalidate_and_set_dirty(mr, addr1, access_len); | ||
102 | + } | ||
103 | + if (xen_enabled()) { | ||
104 | + xen_invalidate_map_cache_entry(buffer); | ||
105 | + } | ||
106 | } | ||
107 | memory_region_unref(mr); | ||
108 | return; | ||
109 | @@ -3201,10 +3215,18 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, | ||
110 | * doing this if we found actual RAM, which behaves the same | ||
111 | * regardless of attributes; so UNSPECIFIED is fine. | ||
112 | */ | ||
113 | + if (mr->ops && mr->ops->map) { | ||
114 | + cache->ptr = mr->ops->map(&mr, addr, &l, is_write, | ||
115 | + MEMTXATTRS_UNSPECIFIED); | ||
116 | + } | ||
117 | + | ||
118 | l = flatview_extend_translation(cache->fv, addr, len, mr, | ||
119 | cache->xlat, l, is_write, | ||
120 | MEMTXATTRS_UNSPECIFIED); | ||
121 | - cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); | ||
122 | + if (!cache->ptr) { | ||
123 | + cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, | ||
124 | + true); | ||
125 | + } | ||
126 | } else { | ||
127 | cache->ptr = NULL; | ||
128 | } | ||
129 | @@ -3226,14 +3248,20 @@ void address_space_cache_invalidate(MemoryRegionCache *cache, | ||
130 | |||
131 | void address_space_cache_destroy(MemoryRegionCache *cache) | ||
132 | { | ||
133 | - if (!cache->mrs.mr) { | ||
134 | + MemoryRegion *mr = cache->mrs.mr; | ||
135 | + | ||
136 | + if (!mr) { | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | - if (xen_enabled()) { | ||
141 | + if (mr->ops && mr->ops->unmap) { | ||
142 | + mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len, | ||
143 | + cache->is_write, cache->len); | ||
144 | + } else if (xen_enabled()) { | ||
145 | xen_invalidate_map_cache_entry(cache->ptr); | ||
146 | } | ||
147 | - memory_region_unref(cache->mrs.mr); | ||
148 | + | ||
149 | + memory_region_unref(mr); | ||
150 | flatview_unref(cache->fv); | ||
151 | cache->mrs.mr = NULL; | ||
152 | cache->fv = NULL; | ||
153 | -- | ||
154 | 2.39.2 | ||
155 | |||