summaryrefslogtreecommitdiffstats
path: root/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch')
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch180
1 files changed, 0 insertions, 180 deletions
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch
deleted file mode 100644
index 4337e0c8..00000000
--- a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch
+++ /dev/null
@@ -1,180 +0,0 @@
1From 2a01fa06d267f68148d3a6df50675edfe090601a Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Fri, 1 Jul 2022 18:16:52 -0700
4Subject: [PATCH 04/16] xen-hvm: move x86-specific fields out of XenIOState
5
6Move:
7- shared_vmport_page
8- log_for_dirtybit
9- dirty_bitmap
10- suspend
11- wakeup
12
13out of XenIOState as they are only used on x86, especially the ones
14related to dirty logging.
15
16Remove free_phys_offset that was unused.
17
18Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
19---
20 hw/i386/xen/xen-hvm.c | 58 ++++++++++++++++++++-----------------------
21 1 file changed, 27 insertions(+), 31 deletions(-)
22
23diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
24index b27484ad22..225cfdf8b7 100644
25--- a/hw/i386/xen/xen-hvm.c
26+++ b/hw/i386/xen/xen-hvm.c
27@@ -73,6 +73,7 @@ struct shared_vmport_iopage {
28 };
29 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
30 #endif
31+static shared_vmport_iopage_t *shared_vmport_page;
32
33 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
34 {
35@@ -95,6 +96,11 @@ typedef struct XenPhysmap {
36 } XenPhysmap;
37
38 static QLIST_HEAD(, XenPhysmap) xen_physmap;
39+static const XenPhysmap *log_for_dirtybit = NULL;
40+/* Buffer used by xen_sync_dirty_bitmap */
41+static unsigned long *dirty_bitmap = NULL;
42+static Notifier suspend;
43+static Notifier wakeup;
44
45 typedef struct XenPciDevice {
46 PCIDevice *pci_dev;
47@@ -105,7 +111,6 @@ typedef struct XenPciDevice {
48 typedef struct XenIOState {
49 ioservid_t ioservid;
50 shared_iopage_t *shared_page;
51- shared_vmport_iopage_t *shared_vmport_page;
52 buffered_iopage_t *buffered_io_page;
53 xenforeignmemory_resource_handle *fres;
54 QEMUTimer *buffered_io_timer;
55@@ -125,14 +130,8 @@ typedef struct XenIOState {
56 MemoryListener io_listener;
57 QLIST_HEAD(, XenPciDevice) dev_list;
58 DeviceListener device_listener;
59- hwaddr free_phys_offset;
60- const XenPhysmap *log_for_dirtybit;
61- /* Buffer used by xen_sync_dirty_bitmap */
62- unsigned long *dirty_bitmap;
63
64 Notifier exit;
65- Notifier suspend;
66- Notifier wakeup;
67 } XenIOState;
68
69 /* Xen specific function for piix pci */
70@@ -462,10 +461,10 @@ static int xen_remove_from_physmap(XenIOState *state,
71 }
72
73 QLIST_REMOVE(physmap, list);
74- if (state->log_for_dirtybit == physmap) {
75- state->log_for_dirtybit = NULL;
76- g_free(state->dirty_bitmap);
77- state->dirty_bitmap = NULL;
78+ if (log_for_dirtybit == physmap) {
79+ log_for_dirtybit = NULL;
80+ g_free(dirty_bitmap);
81+ dirty_bitmap = NULL;
82 }
83 g_free(physmap);
84
85@@ -626,16 +625,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
86 return;
87 }
88
89- if (state->log_for_dirtybit == NULL) {
90- state->log_for_dirtybit = physmap;
91- state->dirty_bitmap = g_new(unsigned long, bitmap_size);
92- } else if (state->log_for_dirtybit != physmap) {
93+ if (log_for_dirtybit == NULL) {
94+ log_for_dirtybit = physmap;
95+ dirty_bitmap = g_new(unsigned long, bitmap_size);
96+ } else if (log_for_dirtybit != physmap) {
97 /* Only one range for dirty bitmap can be tracked. */
98 return;
99 }
100
101 rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
102- npages, state->dirty_bitmap);
103+ npages, dirty_bitmap);
104 if (rc < 0) {
105 #ifndef ENODATA
106 #define ENODATA ENOENT
107@@ -650,7 +649,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
108 }
109
110 for (i = 0; i < bitmap_size; i++) {
111- unsigned long map = state->dirty_bitmap[i];
112+ unsigned long map = dirty_bitmap[i];
113 while (map != 0) {
114 j = ctzl(map);
115 map &= ~(1ul << j);
116@@ -676,12 +675,10 @@ static void xen_log_start(MemoryListener *listener,
117 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
118 int old, int new)
119 {
120- XenIOState *state = container_of(listener, XenIOState, memory_listener);
121-
122 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
123- state->log_for_dirtybit = NULL;
124- g_free(state->dirty_bitmap);
125- state->dirty_bitmap = NULL;
126+ log_for_dirtybit = NULL;
127+ g_free(dirty_bitmap);
128+ dirty_bitmap = NULL;
129 /* Disable dirty bit tracking */
130 xen_track_dirty_vram(xen_domid, 0, 0, NULL);
131 }
132@@ -1021,9 +1018,9 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
133 {
134 vmware_regs_t *vmport_regs;
135
136- assert(state->shared_vmport_page);
137+ assert(shared_vmport_page);
138 vmport_regs =
139- &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
140+ &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
141 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
142
143 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
144@@ -1468,7 +1465,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
145
146 state->memory_listener = xen_memory_listener;
147 memory_listener_register(&state->memory_listener, &address_space_memory);
148- state->log_for_dirtybit = NULL;
149
150 state->io_listener = xen_io_listener;
151 memory_listener_register(&state->io_listener, &address_space_io);
152@@ -1489,19 +1485,19 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
153 QLIST_INIT(&xen_physmap);
154 xen_read_physmap(state);
155
156- state->suspend.notify = xen_suspend_notifier;
157- qemu_register_suspend_notifier(&state->suspend);
158+ suspend.notify = xen_suspend_notifier;
159+ qemu_register_suspend_notifier(&suspend);
160
161- state->wakeup.notify = xen_wakeup_notifier;
162- qemu_register_wakeup_notifier(&state->wakeup);
163+ wakeup.notify = xen_wakeup_notifier;
164+ qemu_register_wakeup_notifier(&wakeup);
165
166 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
167 if (!rc) {
168 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
169- state->shared_vmport_page =
170+ shared_vmport_page =
171 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
172 1, &ioreq_pfn, NULL);
173- if (state->shared_vmport_page == NULL) {
174+ if (shared_vmport_page == NULL) {
175 error_report("map shared vmport IO page returned error %d handle=%p",
176 errno, xen_xc);
177 goto err;
178--
1792.17.1
180