summaryrefslogtreecommitdiffstats
path: root/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch')
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch2094
1 files changed, 0 insertions, 2094 deletions
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch
deleted file mode 100644
index 7df302a2..00000000
--- a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-7.1/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch
+++ /dev/null
@@ -1,2094 +0,0 @@
1From 87d362e72e65b604da7554657204344a6540d88c Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 15:59:47 -0700
4Subject: [PATCH 06/16] xen-hvm: move common functions to
5 hw/xen/xen-hvm-common.c
6
7Extract common functionalities from xen-hvm.c and move them to
8hw/xen/xen-hvm-common.c. These common functions are useful for creating
9an IOREQ server.
10
11Moved the common usable IOREQ creation part to a new function
12xen_register_ioreq() which can be used by both x86 and ARM machines.
13
14NOTE: This patch will break the build as the patch only involves moving
15of functions. Build fixes will be in the next patch.
16
17Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
18Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
19Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
20---
21 hw/i386/xen/trace-events | 14 -
22 hw/i386/xen/xen-hvm.c | 927 +-------------------------------
23 hw/xen/meson.build | 5 +-
24 hw/xen/trace-events | 14 +
25 hw/xen/xen-hvm-common.c | 861 +++++++++++++++++++++++++++++
26 include/hw/i386/xen_arch_hvm.h | 1 +
27 include/hw/xen/xen-hvm-common.h | 98 ++++
28 7 files changed, 986 insertions(+), 934 deletions(-)
29 create mode 100644 hw/xen/xen-hvm-common.c
30 create mode 100644 include/hw/xen/xen-hvm-common.h
31
32diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events
33index a0c89d91c4..5d0a8d6dcf 100644
34--- a/hw/i386/xen/trace-events
35+++ b/hw/i386/xen/trace-events
36@@ -7,17 +7,3 @@ xen_platform_log(char *s) "xen platform: %s"
37 xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")"
38 xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")"
39
40-# xen-hvm.c
41-xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx"
42-xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i"
43-handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
44-handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
45-handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
46-cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
47-cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
48-cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
49-cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
50-xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
51-cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
52-cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
53-
54diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
55index 178f0c68fc..36d87555a9 100644
56--- a/hw/i386/xen/xen-hvm.c
57+++ b/hw/i386/xen/xen-hvm.c
58@@ -10,43 +10,21 @@
59
60 #include "qemu/osdep.h"
61 #include "qemu/units.h"
62+#include "qapi/error.h"
63+#include "qapi/qapi-commands-migration.h"
64+#include "trace.h"
65
66-#include "cpu.h"
67-#include "hw/pci/pci.h"
68-#include "hw/pci/pci_host.h"
69 #include "hw/i386/pc.h"
70 #include "hw/irq.h"
71-#include "hw/hw.h"
72 #include "hw/i386/apic-msidef.h"
73-#include "hw/xen/xen_common.h"
74-#include "hw/xen/xen-legacy-backend.h"
75-#include "hw/xen/xen-bus.h"
76 #include "hw/xen/xen-x86.h"
77-#include "qapi/error.h"
78-#include "qapi/qapi-commands-migration.h"
79-#include "qemu/error-report.h"
80-#include "qemu/main-loop.h"
81 #include "qemu/range.h"
82-#include "sysemu/runstate.h"
83-#include "sysemu/sysemu.h"
84-#include "sysemu/xen.h"
85-#include "sysemu/xen-mapcache.h"
86-#include "trace.h"
87
88-#include <xen/hvm/ioreq.h>
89+#include "hw/xen/xen-hvm-common.h"
90+#include "hw/xen/arch_hvm.h"
91 #include <xen/hvm/e820.h>
92
93-//#define DEBUG_XEN_HVM
94-
95-#ifdef DEBUG_XEN_HVM
96-#define DPRINTF(fmt, ...) \
97- do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
98-#else
99-#define DPRINTF(fmt, ...) \
100- do { } while (0)
101-#endif
102-
103-static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
104+static MemoryRegion ram_640k, ram_lo, ram_hi;
105 static MemoryRegion *framebuffer;
106 static bool xen_in_migration;
107
108@@ -75,25 +53,6 @@ typedef struct shared_vmport_iopage shared_vmport_iopage_t;
109 #endif
110 static shared_vmport_iopage_t *shared_vmport_page;
111
112-static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
113-{
114- return shared_page->vcpu_ioreq[i].vp_eport;
115-}
116-static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
117-{
118- return &shared_page->vcpu_ioreq[vcpu];
119-}
120-
121-#define BUFFER_IO_MAX_DELAY 100
122-
123-typedef struct XenPhysmap {
124- hwaddr start_addr;
125- ram_addr_t size;
126- const char *name;
127- hwaddr phys_offset;
128-
129- QLIST_ENTRY(XenPhysmap) list;
130-} XenPhysmap;
131
132 static QLIST_HEAD(, XenPhysmap) xen_physmap;
133 static const XenPhysmap *log_for_dirtybit = NULL;
134@@ -102,40 +61,6 @@ static unsigned long *dirty_bitmap = NULL;
135 static Notifier suspend;
136 static Notifier wakeup;
137
138-typedef struct XenPciDevice {
139- PCIDevice *pci_dev;
140- uint32_t sbdf;
141- QLIST_ENTRY(XenPciDevice) entry;
142-} XenPciDevice;
143-
144-typedef struct XenIOState {
145- ioservid_t ioservid;
146- shared_iopage_t *shared_page;
147- buffered_iopage_t *buffered_io_page;
148- xenforeignmemory_resource_handle *fres;
149- QEMUTimer *buffered_io_timer;
150- CPUState **cpu_by_vcpu_id;
151- /* the evtchn port for polling the notification, */
152- evtchn_port_t *ioreq_local_port;
153- /* evtchn remote and local ports for buffered io */
154- evtchn_port_t bufioreq_remote_port;
155- evtchn_port_t bufioreq_local_port;
156- /* the evtchn fd for polling */
157- xenevtchn_handle *xce_handle;
158- /* which vcpu we are serving */
159- int send_vcpu;
160-
161- struct xs_handle *xenstore;
162- MemoryListener memory_listener;
163- MemoryListener io_listener;
164- QLIST_HEAD(, XenPciDevice) dev_list;
165- DeviceListener device_listener;
166-
167- Notifier exit;
168-} XenIOState;
169-
170-#include "hw/xen/arch_hvm.h"
171-
172 /* Xen specific function for piix pci */
173
174 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
175@@ -248,42 +173,6 @@ static void xen_ram_init(PCMachineState *pcms,
176 }
177 }
178
179-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
180- Error **errp)
181-{
182- unsigned long nr_pfn;
183- xen_pfn_t *pfn_list;
184- int i;
185-
186- if (runstate_check(RUN_STATE_INMIGRATE)) {
187- /* RAM already populated in Xen */
188- fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
189- " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
190- __func__, size, ram_addr);
191- return;
192- }
193-
194- if (mr == &ram_memory) {
195- return;
196- }
197-
198- trace_xen_ram_alloc(ram_addr, size);
199-
200- nr_pfn = size >> TARGET_PAGE_BITS;
201- pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
202-
203- for (i = 0; i < nr_pfn; i++) {
204- pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
205- }
206-
207- if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
208- error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
209- ram_addr);
210- }
211-
212- g_free(pfn_list);
213-}
214-
215 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
216 {
217 XenPhysmap *physmap = NULL;
218@@ -473,109 +362,6 @@ static int xen_remove_from_physmap(XenIOState *state,
219 return 0;
220 }
221
222-static void xen_set_memory(struct MemoryListener *listener,
223- MemoryRegionSection *section,
224- bool add)
225-{
226- XenIOState *state = container_of(listener, XenIOState, memory_listener);
227-
228- if (section->mr == &ram_memory) {
229- return;
230- } else {
231- if (add) {
232- xen_map_memory_section(xen_domid, state->ioservid,
233- section);
234- } else {
235- xen_unmap_memory_section(xen_domid, state->ioservid,
236- section);
237- }
238- }
239- arch_xen_set_memory(state, section, add);
240-}
241-
242-static void xen_region_add(MemoryListener *listener,
243- MemoryRegionSection *section)
244-{
245- memory_region_ref(section->mr);
246- xen_set_memory(listener, section, true);
247-}
248-
249-static void xen_region_del(MemoryListener *listener,
250- MemoryRegionSection *section)
251-{
252- xen_set_memory(listener, section, false);
253- memory_region_unref(section->mr);
254-}
255-
256-static void xen_io_add(MemoryListener *listener,
257- MemoryRegionSection *section)
258-{
259- XenIOState *state = container_of(listener, XenIOState, io_listener);
260- MemoryRegion *mr = section->mr;
261-
262- if (mr->ops == &unassigned_io_ops) {
263- return;
264- }
265-
266- memory_region_ref(mr);
267-
268- xen_map_io_section(xen_domid, state->ioservid, section);
269-}
270-
271-static void xen_io_del(MemoryListener *listener,
272- MemoryRegionSection *section)
273-{
274- XenIOState *state = container_of(listener, XenIOState, io_listener);
275- MemoryRegion *mr = section->mr;
276-
277- if (mr->ops == &unassigned_io_ops) {
278- return;
279- }
280-
281- xen_unmap_io_section(xen_domid, state->ioservid, section);
282-
283- memory_region_unref(mr);
284-}
285-
286-static void xen_device_realize(DeviceListener *listener,
287- DeviceState *dev)
288-{
289- XenIOState *state = container_of(listener, XenIOState, device_listener);
290-
291- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
292- PCIDevice *pci_dev = PCI_DEVICE(dev);
293- XenPciDevice *xendev = g_new(XenPciDevice, 1);
294-
295- xendev->pci_dev = pci_dev;
296- xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
297- pci_dev->devfn);
298- QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
299-
300- xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
301- }
302-}
303-
304-static void xen_device_unrealize(DeviceListener *listener,
305- DeviceState *dev)
306-{
307- XenIOState *state = container_of(listener, XenIOState, device_listener);
308-
309- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
310- PCIDevice *pci_dev = PCI_DEVICE(dev);
311- XenPciDevice *xendev, *next;
312-
313- xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
314-
315- QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
316- if (xendev->pci_dev == pci_dev) {
317- QLIST_REMOVE(xendev, entry);
318- g_free(xendev);
319- break;
320- }
321- }
322- }
323-}
324-
325 static void xen_sync_dirty_bitmap(XenIOState *state,
326 hwaddr start_addr,
327 ram_addr_t size)
328@@ -683,277 +469,6 @@ static MemoryListener xen_memory_listener = {
329 .priority = 10,
330 };
331
332-static MemoryListener xen_io_listener = {
333- .name = "xen-io",
334- .region_add = xen_io_add,
335- .region_del = xen_io_del,
336- .priority = 10,
337-};
338-
339-static DeviceListener xen_device_listener = {
340- .realize = xen_device_realize,
341- .unrealize = xen_device_unrealize,
342-};
343-
344-/* get the ioreq packets from share mem */
345-static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
346-{
347- ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
348-
349- if (req->state != STATE_IOREQ_READY) {
350- DPRINTF("I/O request not ready: "
351- "%x, ptr: %x, port: %"PRIx64", "
352- "data: %"PRIx64", count: %u, size: %u\n",
353- req->state, req->data_is_ptr, req->addr,
354- req->data, req->count, req->size);
355- return NULL;
356- }
357-
358- xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
359-
360- req->state = STATE_IOREQ_INPROCESS;
361- return req;
362-}
363-
364-/* use poll to get the port notification */
365-/* ioreq_vec--out,the */
366-/* retval--the number of ioreq packet */
367-static ioreq_t *cpu_get_ioreq(XenIOState *state)
368-{
369- MachineState *ms = MACHINE(qdev_get_machine());
370- unsigned int max_cpus = ms->smp.max_cpus;
371- int i;
372- evtchn_port_t port;
373-
374- port = xenevtchn_pending(state->xce_handle);
375- if (port == state->bufioreq_local_port) {
376- timer_mod(state->buffered_io_timer,
377- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
378- return NULL;
379- }
380-
381- if (port != -1) {
382- for (i = 0; i < max_cpus; i++) {
383- if (state->ioreq_local_port[i] == port) {
384- break;
385- }
386- }
387-
388- if (i == max_cpus) {
389- hw_error("Fatal error while trying to get io event!\n");
390- }
391-
392- /* unmask the wanted port again */
393- xenevtchn_unmask(state->xce_handle, port);
394-
395- /* get the io packet from shared memory */
396- state->send_vcpu = i;
397- return cpu_get_ioreq_from_shared_memory(state, i);
398- }
399-
400- /* read error or read nothing */
401- return NULL;
402-}
403-
404-static uint32_t do_inp(uint32_t addr, unsigned long size)
405-{
406- switch (size) {
407- case 1:
408- return cpu_inb(addr);
409- case 2:
410- return cpu_inw(addr);
411- case 4:
412- return cpu_inl(addr);
413- default:
414- hw_error("inp: bad size: %04x %lx", addr, size);
415- }
416-}
417-
418-static void do_outp(uint32_t addr,
419- unsigned long size, uint32_t val)
420-{
421- switch (size) {
422- case 1:
423- return cpu_outb(addr, val);
424- case 2:
425- return cpu_outw(addr, val);
426- case 4:
427- return cpu_outl(addr, val);
428- default:
429- hw_error("outp: bad size: %04x %lx", addr, size);
430- }
431-}
432-
433-/*
434- * Helper functions which read/write an object from/to physical guest
435- * memory, as part of the implementation of an ioreq.
436- *
437- * Equivalent to
438- * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
439- * val, req->size, 0/1)
440- * except without the integer overflow problems.
441- */
442-static void rw_phys_req_item(hwaddr addr,
443- ioreq_t *req, uint32_t i, void *val, int rw)
444-{
445- /* Do everything unsigned so overflow just results in a truncated result
446- * and accesses to undesired parts of guest memory, which is up
447- * to the guest */
448- hwaddr offset = (hwaddr)req->size * i;
449- if (req->df) {
450- addr -= offset;
451- } else {
452- addr += offset;
453- }
454- cpu_physical_memory_rw(addr, val, req->size, rw);
455-}
456-
457-static inline void read_phys_req_item(hwaddr addr,
458- ioreq_t *req, uint32_t i, void *val)
459-{
460- rw_phys_req_item(addr, req, i, val, 0);
461-}
462-static inline void write_phys_req_item(hwaddr addr,
463- ioreq_t *req, uint32_t i, void *val)
464-{
465- rw_phys_req_item(addr, req, i, val, 1);
466-}
467-
468-
469-static void cpu_ioreq_pio(ioreq_t *req)
470-{
471- uint32_t i;
472-
473- trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
474- req->data, req->count, req->size);
475-
476- if (req->size > sizeof(uint32_t)) {
477- hw_error("PIO: bad size (%u)", req->size);
478- }
479-
480- if (req->dir == IOREQ_READ) {
481- if (!req->data_is_ptr) {
482- req->data = do_inp(req->addr, req->size);
483- trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
484- req->size);
485- } else {
486- uint32_t tmp;
487-
488- for (i = 0; i < req->count; i++) {
489- tmp = do_inp(req->addr, req->size);
490- write_phys_req_item(req->data, req, i, &tmp);
491- }
492- }
493- } else if (req->dir == IOREQ_WRITE) {
494- if (!req->data_is_ptr) {
495- trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
496- req->size);
497- do_outp(req->addr, req->size, req->data);
498- } else {
499- for (i = 0; i < req->count; i++) {
500- uint32_t tmp = 0;
501-
502- read_phys_req_item(req->data, req, i, &tmp);
503- do_outp(req->addr, req->size, tmp);
504- }
505- }
506- }
507-}
508-
509-static void cpu_ioreq_move(ioreq_t *req)
510-{
511- uint32_t i;
512-
513- trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
514- req->data, req->count, req->size);
515-
516- if (req->size > sizeof(req->data)) {
517- hw_error("MMIO: bad size (%u)", req->size);
518- }
519-
520- if (!req->data_is_ptr) {
521- if (req->dir == IOREQ_READ) {
522- for (i = 0; i < req->count; i++) {
523- read_phys_req_item(req->addr, req, i, &req->data);
524- }
525- } else if (req->dir == IOREQ_WRITE) {
526- for (i = 0; i < req->count; i++) {
527- write_phys_req_item(req->addr, req, i, &req->data);
528- }
529- }
530- } else {
531- uint64_t tmp;
532-
533- if (req->dir == IOREQ_READ) {
534- for (i = 0; i < req->count; i++) {
535- read_phys_req_item(req->addr, req, i, &tmp);
536- write_phys_req_item(req->data, req, i, &tmp);
537- }
538- } else if (req->dir == IOREQ_WRITE) {
539- for (i = 0; i < req->count; i++) {
540- read_phys_req_item(req->data, req, i, &tmp);
541- write_phys_req_item(req->addr, req, i, &tmp);
542- }
543- }
544- }
545-}
546-
547-static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
548-{
549- uint32_t sbdf = req->addr >> 32;
550- uint32_t reg = req->addr;
551- XenPciDevice *xendev;
552-
553- if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
554- req->size != sizeof(uint32_t)) {
555- hw_error("PCI config access: bad size (%u)", req->size);
556- }
557-
558- if (req->count != 1) {
559- hw_error("PCI config access: bad count (%u)", req->count);
560- }
561-
562- QLIST_FOREACH(xendev, &state->dev_list, entry) {
563- if (xendev->sbdf != sbdf) {
564- continue;
565- }
566-
567- if (!req->data_is_ptr) {
568- if (req->dir == IOREQ_READ) {
569- req->data = pci_host_config_read_common(
570- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
571- req->size);
572- trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
573- req->size, req->data);
574- } else if (req->dir == IOREQ_WRITE) {
575- trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
576- req->size, req->data);
577- pci_host_config_write_common(
578- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
579- req->data, req->size);
580- }
581- } else {
582- uint32_t tmp;
583-
584- if (req->dir == IOREQ_READ) {
585- tmp = pci_host_config_read_common(
586- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
587- req->size);
588- trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
589- req->size, tmp);
590- write_phys_req_item(req->data, req, 0, &tmp);
591- } else if (req->dir == IOREQ_WRITE) {
592- read_phys_req_item(req->data, req, 0, &tmp);
593- trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
594- req->size, tmp);
595- pci_host_config_write_common(
596- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
597- tmp, req->size);
598- }
599- }
600- }
601-}
602-
603 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
604 {
605 X86CPU *cpu;
606@@ -997,223 +512,6 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
607 current_cpu = NULL;
608 }
609
610-static void handle_ioreq(XenIOState *state, ioreq_t *req)
611-{
612- trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
613- req->addr, req->data, req->count, req->size);
614-
615- if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
616- (req->size < sizeof (target_ulong))) {
617- req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
618- }
619-
620- if (req->dir == IOREQ_WRITE)
621- trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
622- req->addr, req->data, req->count, req->size);
623-
624- switch (req->type) {
625- case IOREQ_TYPE_PIO:
626- cpu_ioreq_pio(req);
627- break;
628- case IOREQ_TYPE_COPY:
629- cpu_ioreq_move(req);
630- break;
631- case IOREQ_TYPE_TIMEOFFSET:
632- break;
633- case IOREQ_TYPE_INVALIDATE:
634- xen_invalidate_map_cache();
635- break;
636- case IOREQ_TYPE_PCI_CONFIG:
637- cpu_ioreq_config(state, req);
638- break;
639- default:
640- arch_handle_ioreq(state, req);
641- }
642- if (req->dir == IOREQ_READ) {
643- trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
644- req->addr, req->data, req->count, req->size);
645- }
646-}
647-
648-static bool handle_buffered_iopage(XenIOState *state)
649-{
650- buffered_iopage_t *buf_page = state->buffered_io_page;
651- buf_ioreq_t *buf_req = NULL;
652- bool handled_ioreq = false;
653- ioreq_t req;
654- int qw;
655-
656- if (!buf_page) {
657- return 0;
658- }
659-
660- memset(&req, 0x00, sizeof(req));
661- req.state = STATE_IOREQ_READY;
662- req.count = 1;
663- req.dir = IOREQ_WRITE;
664-
665- for (;;) {
666- uint32_t rdptr = buf_page->read_pointer, wrptr;
667-
668- xen_rmb();
669- wrptr = buf_page->write_pointer;
670- xen_rmb();
671- if (rdptr != buf_page->read_pointer) {
672- continue;
673- }
674- if (rdptr == wrptr) {
675- break;
676- }
677- buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
678- req.size = 1U << buf_req->size;
679- req.addr = buf_req->addr;
680- req.data = buf_req->data;
681- req.type = buf_req->type;
682- xen_rmb();
683- qw = (req.size == 8);
684- if (qw) {
685- if (rdptr + 1 == wrptr) {
686- hw_error("Incomplete quad word buffered ioreq");
687- }
688- buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
689- IOREQ_BUFFER_SLOT_NUM];
690- req.data |= ((uint64_t)buf_req->data) << 32;
691- xen_rmb();
692- }
693-
694- handle_ioreq(state, &req);
695-
696- /* Only req.data may get updated by handle_ioreq(), albeit even that
697- * should not happen as such data would never make it to the guest (we
698- * can only usefully see writes here after all).
699- */
700- assert(req.state == STATE_IOREQ_READY);
701- assert(req.count == 1);
702- assert(req.dir == IOREQ_WRITE);
703- assert(!req.data_is_ptr);
704-
705- qatomic_add(&buf_page->read_pointer, qw + 1);
706- handled_ioreq = true;
707- }
708-
709- return handled_ioreq;
710-}
711-
712-static void handle_buffered_io(void *opaque)
713-{
714- XenIOState *state = opaque;
715-
716- if (handle_buffered_iopage(state)) {
717- timer_mod(state->buffered_io_timer,
718- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
719- } else {
720- timer_del(state->buffered_io_timer);
721- xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
722- }
723-}
724-
725-static void cpu_handle_ioreq(void *opaque)
726-{
727- XenIOState *state = opaque;
728- ioreq_t *req = cpu_get_ioreq(state);
729-
730- handle_buffered_iopage(state);
731- if (req) {
732- ioreq_t copy = *req;
733-
734- xen_rmb();
735- handle_ioreq(state, &copy);
736- req->data = copy.data;
737-
738- if (req->state != STATE_IOREQ_INPROCESS) {
739- fprintf(stderr, "Badness in I/O request ... not in service?!: "
740- "%x, ptr: %x, port: %"PRIx64", "
741- "data: %"PRIx64", count: %u, size: %u, type: %u\n",
742- req->state, req->data_is_ptr, req->addr,
743- req->data, req->count, req->size, req->type);
744- destroy_hvm_domain(false);
745- return;
746- }
747-
748- xen_wmb(); /* Update ioreq contents /then/ update state. */
749-
750- /*
751- * We do this before we send the response so that the tools
752- * have the opportunity to pick up on the reset before the
753- * guest resumes and does a hlt with interrupts disabled which
754- * causes Xen to powerdown the domain.
755- */
756- if (runstate_is_running()) {
757- ShutdownCause request;
758-
759- if (qemu_shutdown_requested_get()) {
760- destroy_hvm_domain(false);
761- }
762- request = qemu_reset_requested_get();
763- if (request) {
764- qemu_system_reset(request);
765- destroy_hvm_domain(true);
766- }
767- }
768-
769- req->state = STATE_IORESP_READY;
770- xenevtchn_notify(state->xce_handle,
771- state->ioreq_local_port[state->send_vcpu]);
772- }
773-}
774-
775-static void xen_main_loop_prepare(XenIOState *state)
776-{
777- int evtchn_fd = -1;
778-
779- if (state->xce_handle != NULL) {
780- evtchn_fd = xenevtchn_fd(state->xce_handle);
781- }
782-
783- state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
784- state);
785-
786- if (evtchn_fd != -1) {
787- CPUState *cpu_state;
788-
789- DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
790- CPU_FOREACH(cpu_state) {
791- DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
792- __func__, cpu_state->cpu_index, cpu_state);
793- state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
794- }
795- qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
796- }
797-}
798-
799-
800-static void xen_hvm_change_state_handler(void *opaque, bool running,
801- RunState rstate)
802-{
803- XenIOState *state = opaque;
804-
805- if (running) {
806- xen_main_loop_prepare(state);
807- }
808-
809- xen_set_ioreq_server_state(xen_domid,
810- state->ioservid,
811- (rstate == RUN_STATE_RUNNING));
812-}
813-
814-static void xen_exit_notifier(Notifier *n, void *data)
815-{
816- XenIOState *state = container_of(n, XenIOState, exit);
817-
818- xen_destroy_ioreq_server(xen_domid, state->ioservid);
819- if (state->fres != NULL) {
820- xenforeignmemory_unmap_resource(xen_fmem, state->fres);
821- }
822-
823- xenevtchn_close(state->xce_handle);
824- xs_daemon_close(state->xenstore);
825-}
826-
827 #ifdef XEN_COMPAT_PHYSMAP
828 static void xen_read_physmap(XenIOState *state)
829 {
830@@ -1273,178 +571,17 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
831 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
832 }
833
834-static int xen_map_ioreq_server(XenIOState *state)
835-{
836- void *addr = NULL;
837- xen_pfn_t ioreq_pfn;
838- xen_pfn_t bufioreq_pfn;
839- evtchn_port_t bufioreq_evtchn;
840- int rc;
841-
842- /*
843- * Attempt to map using the resource API and fall back to normal
844- * foreign mapping if this is not supported.
845- */
846- QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
847- QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
848- state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
849- XENMEM_resource_ioreq_server,
850- state->ioservid, 0, 2,
851- &addr,
852- PROT_READ | PROT_WRITE, 0);
853- if (state->fres != NULL) {
854- trace_xen_map_resource_ioreq(state->ioservid, addr);
855- state->buffered_io_page = addr;
856- state->shared_page = addr + TARGET_PAGE_SIZE;
857- } else if (errno != EOPNOTSUPP) {
858- error_report("failed to map ioreq server resources: error %d handle=%p",
859- errno, xen_xc);
860- return -1;
861- }
862-
863- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
864- (state->shared_page == NULL) ?
865- &ioreq_pfn : NULL,
866- (state->buffered_io_page == NULL) ?
867- &bufioreq_pfn : NULL,
868- &bufioreq_evtchn);
869- if (rc < 0) {
870- error_report("failed to get ioreq server info: error %d handle=%p",
871- errno, xen_xc);
872- return rc;
873- }
874-
875- if (state->shared_page == NULL) {
876- DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
877-
878- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
879- PROT_READ | PROT_WRITE,
880- 1, &ioreq_pfn, NULL);
881- if (state->shared_page == NULL) {
882- error_report("map shared IO page returned error %d handle=%p",
883- errno, xen_xc);
884- }
885- }
886-
887- if (state->buffered_io_page == NULL) {
888- DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
889-
890- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
891- PROT_READ | PROT_WRITE,
892- 1, &bufioreq_pfn,
893- NULL);
894- if (state->buffered_io_page == NULL) {
895- error_report("map buffered IO page returned error %d", errno);
896- return -1;
897- }
898- }
899-
900- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
901- return -1;
902- }
903-
904- DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
905-
906- state->bufioreq_remote_port = bufioreq_evtchn;
907-
908- return 0;
909-}
910-
911 void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
912 {
913 MachineState *ms = MACHINE(pcms);
914 unsigned int max_cpus = ms->smp.max_cpus;
915- int i, rc;
916+ int rc;
917 xen_pfn_t ioreq_pfn;
918 XenIOState *state;
919
920 state = g_new0(XenIOState, 1);
921
922- state->xce_handle = xenevtchn_open(NULL, 0);
923- if (state->xce_handle == NULL) {
924- perror("xen: event channel open");
925- goto err;
926- }
927-
928- state->xenstore = xs_daemon_open();
929- if (state->xenstore == NULL) {
930- perror("xen: xenstore open");
931- goto err;
932- }
933-
934- xen_create_ioreq_server(xen_domid, &state->ioservid);
935-
936- state->exit.notify = xen_exit_notifier;
937- qemu_add_exit_notifier(&state->exit);
938-
939- /*
940- * Register wake-up support in QMP query-current-machine API
941- */
942- qemu_register_wakeup_support();
943-
944- rc = xen_map_ioreq_server(state);
945- if (rc < 0) {
946- goto err;
947- }
948-
949- /* Note: cpus is empty at this point in init */
950- state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
951-
952- rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
953- if (rc < 0) {
954- error_report("failed to enable ioreq server info: error %d handle=%p",
955- errno, xen_xc);
956- goto err;
957- }
958-
959- state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
960-
961- /* FIXME: how about if we overflow the page here? */
962- for (i = 0; i < max_cpus; i++) {
963- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
964- xen_vcpu_eport(state->shared_page, i));
965- if (rc == -1) {
966- error_report("shared evtchn %d bind error %d", i, errno);
967- goto err;
968- }
969- state->ioreq_local_port[i] = rc;
970- }
971-
972- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
973- state->bufioreq_remote_port);
974- if (rc == -1) {
975- error_report("buffered evtchn bind error %d", errno);
976- goto err;
977- }
978- state->bufioreq_local_port = rc;
979-
980- /* Init RAM management */
981-#ifdef XEN_COMPAT_PHYSMAP
982- xen_map_cache_init(xen_phys_offset_to_gaddr, state);
983-#else
984- xen_map_cache_init(NULL, state);
985-#endif
986-
987- qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
988-
989- state->memory_listener = xen_memory_listener;
990- memory_listener_register(&state->memory_listener, &address_space_memory);
991-
992- state->io_listener = xen_io_listener;
993- memory_listener_register(&state->io_listener, &address_space_io);
994-
995- state->device_listener = xen_device_listener;
996- QLIST_INIT(&state->dev_list);
997- device_listener_register(&state->device_listener);
998-
999- xen_bus_init();
1000-
1001- /* Initialize backend core & drivers */
1002- if (xen_be_init() != 0) {
1003- error_report("xen backend core setup failed");
1004- goto err;
1005- }
1006- xen_be_register_common();
1007+ xen_register_ioreq(state, max_cpus, xen_memory_listener);
1008
1009 QLIST_INIT(&xen_physmap);
1010 xen_read_physmap(state);
1011@@ -1484,59 +621,11 @@ err:
1012 exit(1);
1013 }
1014
1015-void destroy_hvm_domain(bool reboot)
1016-{
1017- xc_interface *xc_handle;
1018- int sts;
1019- int rc;
1020-
1021- unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1022-
1023- if (xen_dmod) {
1024- rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1025- if (!rc) {
1026- return;
1027- }
1028- if (errno != ENOTTY /* old Xen */) {
1029- perror("xendevicemodel_shutdown failed");
1030- }
1031- /* well, try the old thing then */
1032- }
1033-
1034- xc_handle = xc_interface_open(0, 0, 0);
1035- if (xc_handle == NULL) {
1036- fprintf(stderr, "Cannot acquire xenctrl handle\n");
1037- } else {
1038- sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1039- if (sts != 0) {
1040- fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1041- "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1042- sts, strerror(errno));
1043- } else {
1044- fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1045- reboot ? "reboot" : "poweroff");
1046- }
1047- xc_interface_close(xc_handle);
1048- }
1049-}
1050-
1051 void xen_register_framebuffer(MemoryRegion *mr)
1052 {
1053 framebuffer = mr;
1054 }
1055
1056-void xen_shutdown_fatal_error(const char *fmt, ...)
1057-{
1058- va_list ap;
1059-
1060- va_start(ap, fmt);
1061- vfprintf(stderr, fmt, ap);
1062- va_end(ap);
1063- fprintf(stderr, "Will destroy the domain.\n");
1064- /* destroy the domain */
1065- qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1066-}
1067-
1068 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
1069 {
1070 if (unlikely(xen_in_migration)) {
1071diff --git a/hw/xen/meson.build b/hw/xen/meson.build
1072index 19d0637c46..008e036d63 100644
1073--- a/hw/xen/meson.build
1074+++ b/hw/xen/meson.build
1075@@ -25,4 +25,7 @@ specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss)
1076
1077 xen_ss = ss.source_set()
1078
1079-xen_ss.add(when: 'CONFIG_XEN', if_true: files('xen-mapcache.c'))
1080+xen_ss.add(when: 'CONFIG_XEN', if_true: files(
1081+ 'xen-mapcache.c',
1082+ 'xen-hvm-common.c',
1083+))
1084diff --git a/hw/xen/trace-events b/hw/xen/trace-events
1085index 2c8f238f42..02ca1183da 100644
1086--- a/hw/xen/trace-events
1087+++ b/hw/xen/trace-events
1088@@ -42,6 +42,20 @@ xs_node_vscanf(char *path, char *value) "%s %s"
1089 xs_node_watch(char *path) "%s"
1090 xs_node_unwatch(char *path) "%s"
1091
1092+# xen-hvm.c
1093+xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx"
1094+xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i"
1095+handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1096+handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1097+handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1098+cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1099+cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
1100+cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
1101+cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1102+xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
1103+cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
1104+cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
1105+
1106 # xen-mapcache.c
1107 xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
1108 xen_remap_bucket(uint64_t index) "index 0x%"PRIx64
1109diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
1110new file mode 100644
1111index 0000000000..67f76f6010
1112--- /dev/null
1113+++ b/hw/xen/xen-hvm-common.c
1114@@ -0,0 +1,861 @@
1115+#include "qemu/osdep.h"
1116+#include "qemu/units.h"
1117+#include "qapi/error.h"
1118+#include "trace.h"
1119+
1120+#include "hw/pci/pci_host.h"
1121+#include "hw/xen/xen-hvm-common.h"
1122+#include "hw/xen/xen-legacy-backend.h"
1123+#include "hw/xen/xen-bus.h"
1124+#include "hw/boards.h"
1125+#include "hw/xen/arch_hvm.h"
1126+
1127+MemoryRegion ram_memory;
1128+
1129+MemoryListener xen_io_listener = {
1130+ .name = "xen-io",
1131+ .region_add = xen_io_add,
1132+ .region_del = xen_io_del,
1133+ .priority = 10,
1134+};
1135+
1136+DeviceListener xen_device_listener = {
1137+ .realize = xen_device_realize,
1138+ .unrealize = xen_device_unrealize,
1139+};
1140+
1141+static void xen_set_memory(struct MemoryListener *listener,
1142+ MemoryRegionSection *section,
1143+ bool add)
1144+{
1145+ XenIOState *state = container_of(listener, XenIOState, memory_listener);
1146+
1147+ if (section->mr == &ram_memory) {
1148+ return;
1149+ } else {
1150+ if (add) {
1151+ xen_map_memory_section(xen_domid, state->ioservid,
1152+ section);
1153+ } else {
1154+ xen_unmap_memory_section(xen_domid, state->ioservid,
1155+ section);
1156+ }
1157+ }
1158+ arch_xen_set_memory(state, section, add);
1159+}
1160+
1161+void xen_region_add(MemoryListener *listener,
1162+ MemoryRegionSection *section)
1163+{
1164+ memory_region_ref(section->mr);
1165+ xen_set_memory(listener, section, true);
1166+}
1167+
1168+void xen_region_del(MemoryListener *listener,
1169+ MemoryRegionSection *section)
1170+{
1171+ xen_set_memory(listener, section, false);
1172+ memory_region_unref(section->mr);
1173+}
1174+
1175+void xen_io_add(MemoryListener *listener,
1176+ MemoryRegionSection *section)
1177+{
1178+ XenIOState *state = container_of(listener, XenIOState, io_listener);
1179+ MemoryRegion *mr = section->mr;
1180+
1181+ if (mr->ops == &unassigned_io_ops) {
1182+ return;
1183+ }
1184+
1185+ memory_region_ref(mr);
1186+
1187+ xen_map_io_section(xen_domid, state->ioservid, section);
1188+}
1189+
1190+void xen_io_del(MemoryListener *listener,
1191+ MemoryRegionSection *section)
1192+{
1193+ XenIOState *state = container_of(listener, XenIOState, io_listener);
1194+ MemoryRegion *mr = section->mr;
1195+
1196+ if (mr->ops == &unassigned_io_ops) {
1197+ return;
1198+ }
1199+
1200+ xen_unmap_io_section(xen_domid, state->ioservid, section);
1201+
1202+ memory_region_unref(mr);
1203+}
1204+
1205+void xen_device_realize(DeviceListener *listener,
1206+ DeviceState *dev)
1207+{
1208+ XenIOState *state = container_of(listener, XenIOState, device_listener);
1209+
1210+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1211+ PCIDevice *pci_dev = PCI_DEVICE(dev);
1212+ XenPciDevice *xendev = g_new(XenPciDevice, 1);
1213+
1214+ xendev->pci_dev = pci_dev;
1215+ xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
1216+ pci_dev->devfn);
1217+ QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
1218+
1219+ xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
1220+ }
1221+}
1222+
1223+void xen_device_unrealize(DeviceListener *listener,
1224+ DeviceState *dev)
1225+{
1226+ XenIOState *state = container_of(listener, XenIOState, device_listener);
1227+
1228+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1229+ PCIDevice *pci_dev = PCI_DEVICE(dev);
1230+ XenPciDevice *xendev, *next;
1231+
1232+ xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
1233+
1234+ QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
1235+ if (xendev->pci_dev == pci_dev) {
1236+ QLIST_REMOVE(xendev, entry);
1237+ g_free(xendev);
1238+ break;
1239+ }
1240+ }
1241+ }
1242+}
1243+
1244+/* get the ioreq packets from share mem */
1245+static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
1246+{
1247+ ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
1248+
1249+ if (req->state != STATE_IOREQ_READY) {
1250+ DPRINTF("I/O request not ready: "
1251+ "%x, ptr: %x, port: %"PRIx64", "
1252+ "data: %"PRIx64", count: %u, size: %u\n",
1253+ req->state, req->data_is_ptr, req->addr,
1254+ req->data, req->count, req->size);
1255+ return NULL;
1256+ }
1257+
1258+ xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
1259+
1260+ req->state = STATE_IOREQ_INPROCESS;
1261+ return req;
1262+}
1263+
1264+/* use poll to get the port notification */
1265+/* ioreq_vec--out,the */
1266+/* retval--the number of ioreq packet */
1267+static ioreq_t *cpu_get_ioreq(XenIOState *state)
1268+{
1269+ MachineState *ms = MACHINE(qdev_get_machine());
1270+ unsigned int max_cpus = ms->smp.max_cpus;
1271+ int i;
1272+ evtchn_port_t port;
1273+
1274+ port = xenevtchn_pending(state->xce_handle);
1275+ if (port == state->bufioreq_local_port) {
1276+ timer_mod(state->buffered_io_timer,
1277+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1278+ return NULL;
1279+ }
1280+
1281+ if (port != -1) {
1282+ for (i = 0; i < max_cpus; i++) {
1283+ if (state->ioreq_local_port[i] == port) {
1284+ break;
1285+ }
1286+ }
1287+
1288+ if (i == max_cpus) {
1289+ hw_error("Fatal error while trying to get io event!\n");
1290+ }
1291+
1292+ /* unmask the wanted port again */
1293+ xenevtchn_unmask(state->xce_handle, port);
1294+
1295+ /* get the io packet from shared memory */
1296+ state->send_vcpu = i;
1297+ return cpu_get_ioreq_from_shared_memory(state, i);
1298+ }
1299+
1300+ /* read error or read nothing */
1301+ return NULL;
1302+}
1303+
1304+static uint32_t do_inp(uint32_t addr, unsigned long size)
1305+{
1306+ switch (size) {
1307+ case 1:
1308+ return cpu_inb(addr);
1309+ case 2:
1310+ return cpu_inw(addr);
1311+ case 4:
1312+ return cpu_inl(addr);
1313+ default:
1314+ hw_error("inp: bad size: %04x %lx", addr, size);
1315+ }
1316+}
1317+
1318+static void do_outp(uint32_t addr,
1319+ unsigned long size, uint32_t val)
1320+{
1321+ switch (size) {
1322+ case 1:
1323+ return cpu_outb(addr, val);
1324+ case 2:
1325+ return cpu_outw(addr, val);
1326+ case 4:
1327+ return cpu_outl(addr, val);
1328+ default:
1329+ hw_error("outp: bad size: %04x %lx", addr, size);
1330+ }
1331+}
1332+
1333+/*
1334+ * Helper functions which read/write an object from/to physical guest
1335+ * memory, as part of the implementation of an ioreq.
1336+ *
1337+ * Equivalent to
1338+ * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
1339+ * val, req->size, 0/1)
1340+ * except without the integer overflow problems.
1341+ */
1342+static void rw_phys_req_item(hwaddr addr,
1343+ ioreq_t *req, uint32_t i, void *val, int rw)
1344+{
1345+ /* Do everything unsigned so overflow just results in a truncated result
1346+ * and accesses to undesired parts of guest memory, which is up
1347+ * to the guest */
1348+ hwaddr offset = (hwaddr)req->size * i;
1349+ if (req->df) {
1350+ addr -= offset;
1351+ } else {
1352+ addr += offset;
1353+ }
1354+ cpu_physical_memory_rw(addr, val, req->size, rw);
1355+}
1356+
1357+static inline void read_phys_req_item(hwaddr addr,
1358+ ioreq_t *req, uint32_t i, void *val)
1359+{
1360+ rw_phys_req_item(addr, req, i, val, 0);
1361+}
1362+static inline void write_phys_req_item(hwaddr addr,
1363+ ioreq_t *req, uint32_t i, void *val)
1364+{
1365+ rw_phys_req_item(addr, req, i, val, 1);
1366+}
1367+
1368+
1369+void cpu_ioreq_pio(ioreq_t *req)
1370+{
1371+ uint32_t i;
1372+
1373+ trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
1374+ req->data, req->count, req->size);
1375+
1376+ if (req->size > sizeof(uint32_t)) {
1377+ hw_error("PIO: bad size (%u)", req->size);
1378+ }
1379+
1380+ if (req->dir == IOREQ_READ) {
1381+ if (!req->data_is_ptr) {
1382+ req->data = do_inp(req->addr, req->size);
1383+ trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
1384+ req->size);
1385+ } else {
1386+ uint32_t tmp;
1387+
1388+ for (i = 0; i < req->count; i++) {
1389+ tmp = do_inp(req->addr, req->size);
1390+ write_phys_req_item(req->data, req, i, &tmp);
1391+ }
1392+ }
1393+ } else if (req->dir == IOREQ_WRITE) {
1394+ if (!req->data_is_ptr) {
1395+ trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
1396+ req->size);
1397+ do_outp(req->addr, req->size, req->data);
1398+ } else {
1399+ for (i = 0; i < req->count; i++) {
1400+ uint32_t tmp = 0;
1401+
1402+ read_phys_req_item(req->data, req, i, &tmp);
1403+ do_outp(req->addr, req->size, tmp);
1404+ }
1405+ }
1406+ }
1407+}
1408+
1409+static void cpu_ioreq_move(ioreq_t *req)
1410+{
1411+ uint32_t i;
1412+
1413+ trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
1414+ req->data, req->count, req->size);
1415+
1416+ if (req->size > sizeof(req->data)) {
1417+ hw_error("MMIO: bad size (%u)", req->size);
1418+ }
1419+
1420+ if (!req->data_is_ptr) {
1421+ if (req->dir == IOREQ_READ) {
1422+ for (i = 0; i < req->count; i++) {
1423+ read_phys_req_item(req->addr, req, i, &req->data);
1424+ }
1425+ } else if (req->dir == IOREQ_WRITE) {
1426+ for (i = 0; i < req->count; i++) {
1427+ write_phys_req_item(req->addr, req, i, &req->data);
1428+ }
1429+ }
1430+ } else {
1431+ uint64_t tmp;
1432+
1433+ if (req->dir == IOREQ_READ) {
1434+ for (i = 0; i < req->count; i++) {
1435+ read_phys_req_item(req->addr, req, i, &tmp);
1436+ write_phys_req_item(req->data, req, i, &tmp);
1437+ }
1438+ } else if (req->dir == IOREQ_WRITE) {
1439+ for (i = 0; i < req->count; i++) {
1440+ read_phys_req_item(req->data, req, i, &tmp);
1441+ write_phys_req_item(req->addr, req, i, &tmp);
1442+ }
1443+ }
1444+ }
1445+}
1446+
1447+static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
1448+{
1449+ uint32_t sbdf = req->addr >> 32;
1450+ uint32_t reg = req->addr;
1451+ XenPciDevice *xendev;
1452+
1453+ if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
1454+ req->size != sizeof(uint32_t)) {
1455+ hw_error("PCI config access: bad size (%u)", req->size);
1456+ }
1457+
1458+ if (req->count != 1) {
1459+ hw_error("PCI config access: bad count (%u)", req->count);
1460+ }
1461+
1462+ QLIST_FOREACH(xendev, &state->dev_list, entry) {
1463+ if (xendev->sbdf != sbdf) {
1464+ continue;
1465+ }
1466+
1467+ if (!req->data_is_ptr) {
1468+ if (req->dir == IOREQ_READ) {
1469+ req->data = pci_host_config_read_common(
1470+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1471+ req->size);
1472+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
1473+ req->size, req->data);
1474+ } else if (req->dir == IOREQ_WRITE) {
1475+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
1476+ req->size, req->data);
1477+ pci_host_config_write_common(
1478+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1479+ req->data, req->size);
1480+ }
1481+ } else {
1482+ uint32_t tmp;
1483+
1484+ if (req->dir == IOREQ_READ) {
1485+ tmp = pci_host_config_read_common(
1486+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1487+ req->size);
1488+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
1489+ req->size, tmp);
1490+ write_phys_req_item(req->data, req, 0, &tmp);
1491+ } else if (req->dir == IOREQ_WRITE) {
1492+ read_phys_req_item(req->data, req, 0, &tmp);
1493+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
1494+ req->size, tmp);
1495+ pci_host_config_write_common(
1496+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1497+ tmp, req->size);
1498+ }
1499+ }
1500+ }
1501+}
1502+
1503+static void handle_ioreq(XenIOState *state, ioreq_t *req)
1504+{
1505+ trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
1506+ req->addr, req->data, req->count, req->size);
1507+
1508+ if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
1509+ (req->size < sizeof (target_ulong))) {
1510+ req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
1511+ }
1512+
1513+ if (req->dir == IOREQ_WRITE)
1514+ trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
1515+ req->addr, req->data, req->count, req->size);
1516+
1517+ switch (req->type) {
1518+ case IOREQ_TYPE_PIO:
1519+ cpu_ioreq_pio(req);
1520+ break;
1521+ case IOREQ_TYPE_COPY:
1522+ cpu_ioreq_move(req);
1523+ break;
1524+ case IOREQ_TYPE_TIMEOFFSET:
1525+ break;
1526+ case IOREQ_TYPE_INVALIDATE:
1527+ xen_invalidate_map_cache();
1528+ break;
1529+ case IOREQ_TYPE_PCI_CONFIG:
1530+ cpu_ioreq_config(state, req);
1531+ break;
1532+ default:
1533+ arch_handle_ioreq(state, req);
1534+ }
1535+ if (req->dir == IOREQ_READ) {
1536+ trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
1537+ req->addr, req->data, req->count, req->size);
1538+ }
1539+}
1540+
1541+static bool handle_buffered_iopage(XenIOState *state)
1542+{
1543+ buffered_iopage_t *buf_page = state->buffered_io_page;
1544+ buf_ioreq_t *buf_req = NULL;
1545+ bool handled_ioreq = false;
1546+ ioreq_t req;
1547+ int qw;
1548+
1549+ if (!buf_page) {
1550+ return 0;
1551+ }
1552+
1553+ memset(&req, 0x00, sizeof(req));
1554+ req.state = STATE_IOREQ_READY;
1555+ req.count = 1;
1556+ req.dir = IOREQ_WRITE;
1557+
1558+ for (;;) {
1559+ uint32_t rdptr = buf_page->read_pointer, wrptr;
1560+
1561+ xen_rmb();
1562+ wrptr = buf_page->write_pointer;
1563+ xen_rmb();
1564+ if (rdptr != buf_page->read_pointer) {
1565+ continue;
1566+ }
1567+ if (rdptr == wrptr) {
1568+ break;
1569+ }
1570+ buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1571+ req.size = 1U << buf_req->size;
1572+ req.addr = buf_req->addr;
1573+ req.data = buf_req->data;
1574+ req.type = buf_req->type;
1575+ xen_rmb();
1576+ qw = (req.size == 8);
1577+ if (qw) {
1578+ if (rdptr + 1 == wrptr) {
1579+ hw_error("Incomplete quad word buffered ioreq");
1580+ }
1581+ buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1582+ IOREQ_BUFFER_SLOT_NUM];
1583+ req.data |= ((uint64_t)buf_req->data) << 32;
1584+ xen_rmb();
1585+ }
1586+
1587+ handle_ioreq(state, &req);
1588+
1589+ /* Only req.data may get updated by handle_ioreq(), albeit even that
1590+ * should not happen as such data would never make it to the guest (we
1591+ * can only usefully see writes here after all).
1592+ */
1593+ assert(req.state == STATE_IOREQ_READY);
1594+ assert(req.count == 1);
1595+ assert(req.dir == IOREQ_WRITE);
1596+ assert(!req.data_is_ptr);
1597+
1598+ qatomic_add(&buf_page->read_pointer, qw + 1);
1599+ }
1600+
1601+ return handled_ioreq;
1602+}
1603+
1604+static void handle_buffered_io(void *opaque)
1605+{
1606+ XenIOState *state = opaque;
1607+
1608+ if (handle_buffered_iopage(state)) {
1609+ timer_mod(state->buffered_io_timer,
1610+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1611+ } else {
1612+ timer_del(state->buffered_io_timer);
1613+ xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1614+ }
1615+}
1616+
1617+static void cpu_handle_ioreq(void *opaque)
1618+{
1619+ XenIOState *state = opaque;
1620+ ioreq_t *req = cpu_get_ioreq(state);
1621+
1622+ handle_buffered_iopage(state);
1623+ if (req) {
1624+ ioreq_t copy = *req;
1625+
1626+ xen_rmb();
1627+ handle_ioreq(state, &copy);
1628+ req->data = copy.data;
1629+
1630+ if (req->state != STATE_IOREQ_INPROCESS) {
1631+ fprintf(stderr, "Badness in I/O request ... not in service?!: "
1632+ "%x, ptr: %x, port: %"PRIx64", "
1633+ "data: %"PRIx64", count: %u, size: %u, type: %u\n",
1634+ req->state, req->data_is_ptr, req->addr,
1635+ req->data, req->count, req->size, req->type);
1636+ destroy_hvm_domain(false);
1637+ return;
1638+ }
1639+
1640+ xen_wmb(); /* Update ioreq contents /then/ update state. */
1641+
1642+ /*
1643+ * We do this before we send the response so that the tools
1644+ * have the opportunity to pick up on the reset before the
1645+ * guest resumes and does a hlt with interrupts disabled which
1646+ * causes Xen to powerdown the domain.
1647+ */
1648+ if (runstate_is_running()) {
1649+ ShutdownCause request;
1650+
1651+ if (qemu_shutdown_requested_get()) {
1652+ destroy_hvm_domain(false);
1653+ }
1654+ request = qemu_reset_requested_get();
1655+ if (request) {
1656+ qemu_system_reset(request);
1657+ destroy_hvm_domain(true);
1658+ }
1659+ }
1660+
1661+ req->state = STATE_IORESP_READY;
1662+ xenevtchn_notify(state->xce_handle,
1663+ state->ioreq_local_port[state->send_vcpu]);
1664+ }
1665+}
1666+
1667+static void xen_main_loop_prepare(XenIOState *state)
1668+{
1669+ int evtchn_fd = -1;
1670+
1671+ if (state->xce_handle != NULL) {
1672+ evtchn_fd = xenevtchn_fd(state->xce_handle);
1673+ }
1674+
1675+ state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1676+ state);
1677+
1678+ if (evtchn_fd != -1) {
1679+ CPUState *cpu_state;
1680+
1681+ DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1682+ CPU_FOREACH(cpu_state) {
1683+ DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1684+ __func__, cpu_state->cpu_index, cpu_state);
1685+ state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1686+ }
1687+ qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1688+ }
1689+}
1690+
1691+
1692+void xen_hvm_change_state_handler(void *opaque, bool running,
1693+ RunState rstate)
1694+{
1695+ XenIOState *state = opaque;
1696+
1697+ if (running) {
1698+ xen_main_loop_prepare(state);
1699+ }
1700+
1701+ xen_set_ioreq_server_state(xen_domid,
1702+ state->ioservid,
1703+ (rstate == RUN_STATE_RUNNING));
1704+}
1705+
1706+void xen_exit_notifier(Notifier *n, void *data)
1707+{
1708+ XenIOState *state = container_of(n, XenIOState, exit);
1709+
1710+ xen_destroy_ioreq_server(xen_domid, state->ioservid);
1711+ if (state->fres != NULL) {
1712+ xenforeignmemory_unmap_resource(xen_fmem, state->fres);
1713+ }
1714+
1715+ xenevtchn_close(state->xce_handle);
1716+ xs_daemon_close(state->xenstore);
1717+}
1718+
1719+static int xen_map_ioreq_server(XenIOState *state)
1720+{
1721+ void *addr = NULL;
1722+ xen_pfn_t ioreq_pfn;
1723+ xen_pfn_t bufioreq_pfn;
1724+ evtchn_port_t bufioreq_evtchn;
1725+ int rc;
1726+
1727+ /*
1728+ * Attempt to map using the resource API and fall back to normal
1729+ * foreign mapping if this is not supported.
1730+ */
1731+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
1732+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
1733+ state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
1734+ XENMEM_resource_ioreq_server,
1735+ state->ioservid, 0, 2,
1736+ &addr,
1737+ PROT_READ | PROT_WRITE, 0);
1738+ if (state->fres != NULL) {
1739+ trace_xen_map_resource_ioreq(state->ioservid, addr);
1740+ state->buffered_io_page = addr;
1741+ state->shared_page = addr + TARGET_PAGE_SIZE;
1742+ } else if (errno != EOPNOTSUPP) {
1743+ error_report("failed to map ioreq server resources: error %d handle=%p",
1744+ errno, xen_xc);
1745+ return -1;
1746+ }
1747+
1748+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
1749+ (state->shared_page == NULL) ?
1750+ &ioreq_pfn : NULL,
1751+ (state->buffered_io_page == NULL) ?
1752+ &bufioreq_pfn : NULL,
1753+ &bufioreq_evtchn);
1754+ if (rc < 0) {
1755+ error_report("failed to get ioreq server info: error %d handle=%p",
1756+ errno, xen_xc);
1757+ return rc;
1758+ }
1759+
1760+ if (state->shared_page == NULL) {
1761+ DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1762+
1763+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
1764+ PROT_READ | PROT_WRITE,
1765+ 1, &ioreq_pfn, NULL);
1766+ if (state->shared_page == NULL) {
1767+ error_report("map shared IO page returned error %d handle=%p",
1768+ errno, xen_xc);
1769+ }
1770+ }
1771+
1772+ if (state->buffered_io_page == NULL) {
1773+ DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1774+
1775+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
1776+ PROT_READ | PROT_WRITE,
1777+ 1, &bufioreq_pfn,
1778+ NULL);
1779+ if (state->buffered_io_page == NULL) {
1780+ error_report("map buffered IO page returned error %d", errno);
1781+ return -1;
1782+ }
1783+ }
1784+
1785+ if (state->shared_page == NULL || state->buffered_io_page == NULL) {
1786+ return -1;
1787+ }
1788+
1789+ DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1790+
1791+ state->bufioreq_remote_port = bufioreq_evtchn;
1792+
1793+ return 0;
1794+}
1795+
1796+void xen_shutdown_fatal_error(const char *fmt, ...)
1797+{
1798+ va_list ap;
1799+
1800+ va_start(ap, fmt);
1801+ vfprintf(stderr, fmt, ap);
1802+ va_end(ap);
1803+ fprintf(stderr, "Will destroy the domain.\n");
1804+ /* destroy the domain */
1805+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1806+}
1807+
1808+void destroy_hvm_domain(bool reboot)
1809+{
1810+ xc_interface *xc_handle;
1811+ int sts;
1812+ int rc;
1813+
1814+ unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1815+
1816+ if (xen_dmod) {
1817+ rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1818+ if (!rc) {
1819+ return;
1820+ }
1821+ if (errno != ENOTTY /* old Xen */) {
1822+ perror("xendevicemodel_shutdown failed");
1823+ }
1824+ /* well, try the old thing then */
1825+ }
1826+
1827+ xc_handle = xc_interface_open(0, 0, 0);
1828+ if (xc_handle == NULL) {
1829+ fprintf(stderr, "Cannot acquire xenctrl handle\n");
1830+ } else {
1831+ sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1832+ if (sts != 0) {
1833+ fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1834+ "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1835+ sts, strerror(errno));
1836+ } else {
1837+ fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1838+ reboot ? "reboot" : "poweroff");
1839+ }
1840+ xc_interface_close(xc_handle);
1841+ }
1842+}
1843+
1844+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
1845+ Error **errp)
1846+{
1847+ unsigned long nr_pfn;
1848+ xen_pfn_t *pfn_list;
1849+ int i;
1850+
1851+ if (runstate_check(RUN_STATE_INMIGRATE)) {
1852+ /* RAM already populated in Xen */
1853+ fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
1854+ " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
1855+ __func__, size, ram_addr);
1856+ return;
1857+ }
1858+
1859+ if (mr == &ram_memory) {
1860+ return;
1861+ }
1862+
1863+ trace_xen_ram_alloc(ram_addr, size);
1864+
1865+ nr_pfn = size >> TARGET_PAGE_BITS;
1866+ pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
1867+
1868+ for (i = 0; i < nr_pfn; i++) {
1869+ pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
1870+ }
1871+
1872+ if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
1873+ error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
1874+ ram_addr);
1875+ }
1876+
1877+ g_free(pfn_list);
1878+}
1879+
1880+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
1881+ MemoryListener xen_memory_listener)
1882+{
1883+ int i, rc;
1884+
1885+ state->xce_handle = xenevtchn_open(NULL, 0);
1886+ if (state->xce_handle == NULL) {
1887+ perror("xen: event channel open");
1888+ goto err;
1889+ }
1890+
1891+ state->xenstore = xs_daemon_open();
1892+ if (state->xenstore == NULL) {
1893+ perror("xen: xenstore open");
1894+ goto err;
1895+ }
1896+
1897+ xen_create_ioreq_server(xen_domid, &state->ioservid);
1898+
1899+ state->exit.notify = xen_exit_notifier;
1900+ qemu_add_exit_notifier(&state->exit);
1901+
1902+ /*
1903+ * Register wake-up support in QMP query-current-machine API
1904+ */
1905+ qemu_register_wakeup_support();
1906+
1907+ rc = xen_map_ioreq_server(state);
1908+ if (rc < 0) {
1909+ goto err;
1910+ }
1911+
1912+ /* Note: cpus is empty at this point in init */
1913+ state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
1914+
1915+ rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
1916+ if (rc < 0) {
1917+ error_report("failed to enable ioreq server info: error %d handle=%p",
1918+ errno, xen_xc);
1919+ goto err;
1920+ }
1921+
1922+ state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
1923+
1924+ /* FIXME: how about if we overflow the page here? */
1925+ for (i = 0; i < max_cpus; i++) {
1926+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1927+ xen_vcpu_eport(state->shared_page, i));
1928+ if (rc == -1) {
1929+ error_report("shared evtchn %d bind error %d", i, errno);
1930+ goto err;
1931+ }
1932+ state->ioreq_local_port[i] = rc;
1933+ }
1934+
1935+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1936+ state->bufioreq_remote_port);
1937+ if (rc == -1) {
1938+ error_report("buffered evtchn bind error %d", errno);
1939+ goto err;
1940+ }
1941+ state->bufioreq_local_port = rc;
1942+
1943+ /* Init RAM management */
1944+#ifdef XEN_COMPAT_PHYSMAP
1945+ xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1946+#else
1947+ xen_map_cache_init(NULL, state);
1948+#endif
1949+
1950+ qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1951+
1952+ state->memory_listener = xen_memory_listener;
1953+ memory_listener_register(&state->memory_listener, &address_space_memory);
1954+
1955+ state->io_listener = xen_io_listener;
1956+ memory_listener_register(&state->io_listener, &address_space_io);
1957+
1958+ state->device_listener = xen_device_listener;
1959+ QLIST_INIT(&state->dev_list);
1960+ device_listener_register(&state->device_listener);
1961+
1962+ xen_bus_init();
1963+
1964+ /* Initialize backend core & drivers */
1965+ if (xen_be_init() != 0) {
1966+ error_report("xen backend core setup failed");
1967+ goto err;
1968+ }
1969+ xen_be_register_common();
1970+
1971+ return;
1972+err:
1973+ error_report("xen hardware virtual machine initialisation failed");
1974+ exit(1);
1975+}
1976diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h
1977index 1b2c71ba4f..1000f8f543 100644
1978--- a/include/hw/i386/xen_arch_hvm.h
1979+++ b/include/hw/i386/xen_arch_hvm.h
1980@@ -2,6 +2,7 @@
1981 #define HW_XEN_ARCH_I386_HVM_H
1982
1983 #include <xen/hvm/ioreq.h>
1984+#include "hw/xen/xen-hvm-common.h"
1985
1986 void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
1987 void arch_xen_set_memory(XenIOState *state,
1988diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
1989new file mode 100644
1990index 0000000000..2979f84ee2
1991--- /dev/null
1992+++ b/include/hw/xen/xen-hvm-common.h
1993@@ -0,0 +1,98 @@
1994+#ifndef HW_XEN_HVM_COMMON_H
1995+#define HW_XEN_HVM_COMMON_H
1996+
1997+#include "qemu/osdep.h"
1998+#include "qemu/units.h"
1999+
2000+#include "cpu.h"
2001+#include "hw/pci/pci.h"
2002+#include "hw/hw.h"
2003+#include "hw/xen/xen_common.h"
2004+#include "sysemu/runstate.h"
2005+#include "sysemu/sysemu.h"
2006+#include "sysemu/xen.h"
2007+#include "sysemu/xen-mapcache.h"
2008+
2009+#include <xen/hvm/ioreq.h>
2010+
2011+extern MemoryRegion ram_memory;
2012+extern MemoryListener xen_io_listener;
2013+extern DeviceListener xen_device_listener;
2014+
2015+//#define DEBUG_XEN_HVM
2016+
2017+#ifdef DEBUG_XEN_HVM
2018+#define DPRINTF(fmt, ...) \
2019+ do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
2020+#else
2021+#define DPRINTF(fmt, ...) \
2022+ do { } while (0)
2023+#endif
2024+
2025+static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
2026+{
2027+ return shared_page->vcpu_ioreq[i].vp_eport;
2028+}
2029+static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
2030+{
2031+ return &shared_page->vcpu_ioreq[vcpu];
2032+}
2033+
2034+#define BUFFER_IO_MAX_DELAY 100
2035+
2036+typedef struct XenPhysmap {
2037+ hwaddr start_addr;
2038+ ram_addr_t size;
2039+ const char *name;
2040+ hwaddr phys_offset;
2041+
2042+ QLIST_ENTRY(XenPhysmap) list;
2043+} XenPhysmap;
2044+
2045+typedef struct XenPciDevice {
2046+ PCIDevice *pci_dev;
2047+ uint32_t sbdf;
2048+ QLIST_ENTRY(XenPciDevice) entry;
2049+} XenPciDevice;
2050+
2051+typedef struct XenIOState {
2052+ ioservid_t ioservid;
2053+ shared_iopage_t *shared_page;
2054+ buffered_iopage_t *buffered_io_page;
2055+ xenforeignmemory_resource_handle *fres;
2056+ QEMUTimer *buffered_io_timer;
2057+ CPUState **cpu_by_vcpu_id;
2058+ /* the evtchn port for polling the notification, */
2059+ evtchn_port_t *ioreq_local_port;
2060+ /* evtchn remote and local ports for buffered io */
2061+ evtchn_port_t bufioreq_remote_port;
2062+ evtchn_port_t bufioreq_local_port;
2063+ /* the evtchn fd for polling */
2064+ xenevtchn_handle *xce_handle;
2065+ /* which vcpu we are serving */
2066+ int send_vcpu;
2067+
2068+ struct xs_handle *xenstore;
2069+ MemoryListener memory_listener;
2070+ MemoryListener io_listener;
2071+ QLIST_HEAD(, XenPciDevice) dev_list;
2072+ DeviceListener device_listener;
2073+
2074+ Notifier exit;
2075+} XenIOState;
2076+
2077+void xen_exit_notifier(Notifier *n, void *data);
2078+
2079+void xen_region_add(MemoryListener *listener, MemoryRegionSection *section);
2080+void xen_region_del(MemoryListener *listener, MemoryRegionSection *section);
2081+void xen_io_add(MemoryListener *listener, MemoryRegionSection *section);
2082+void xen_io_del(MemoryListener *listener, MemoryRegionSection *section);
2083+void xen_device_realize(DeviceListener *listener, DeviceState *dev);
2084+void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
2085+
2086+void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
2087+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
2088+ MemoryListener xen_memory_listener);
2089+
2090+void cpu_ioreq_pio(ioreq_t *req);
2091+#endif /* HW_XEN_HVM_COMMON_H */
2092--
20932.17.1
2094