1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
|
From d4774a0e5e1ebed605c5d49e81433fd371d0b680 Mon Sep 17 00:00:00 2001
From: Vikram Garhwal <vikram.garhwal@amd.com>
Date: Tue, 17 Oct 2023 20:22:26 +0000
Subject: [PATCH 05/11] softmmu: physmem: Split ram_block_add()
Extract ram block list update to a new function ram_block_add_list(). This is
done to support grant mappings which adds a memory region for granted memory and
updates the ram_block list.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
include/exec/ram_addr.h | 1 +
softmmu/physmem.c | 62 ++++++++++++++++++++++++++---------------
2 files changed, 40 insertions(+), 23 deletions(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 9f2e3893f5..76fa360463 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -139,6 +139,7 @@ void qemu_ram_free(RAMBlock *block);
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
+void ram_block_add_list(RAMBlock *new_block);
/* Clear whole block of mem */
static inline void qemu_ram_block_writeback(RAMBlock *block)
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 3df73542e1..f73629733e 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -1786,12 +1786,47 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
}
}
+static void ram_block_add_list_locked(RAMBlock *new_block)
+ {
+ RAMBlock *block;
+ RAMBlock *last_block = NULL;
+
+ /*
+ * Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
+ * QLIST (which has an RCU-friendly variant) does not have insertion at
+ * tail, so save the last element in last_block.
+ */
+ RAMBLOCK_FOREACH(block) {
+ last_block = block;
+ if (block->max_length < new_block->max_length) {
+ break;
+ }
+ }
+ if (block) {
+ QLIST_INSERT_BEFORE_RCU(block, new_block, next);
+ } else if (last_block) {
+ QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
+ } else { /* list is empty */
+ QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
+ }
+ ram_list.mru_block = NULL;
+
+ /* Write list before version */
+ smp_wmb();
+ ram_list.version++;
+}
+
+void ram_block_add_list(RAMBlock *new_block)
+{
+ qemu_mutex_lock_ramlist();
+ ram_block_add_list_locked(new_block);
+ qemu_mutex_unlock_ramlist();
+}
+
static void ram_block_add(RAMBlock *new_block, Error **errp)
{
const bool noreserve = qemu_ram_is_noreserve(new_block);
const bool shared = qemu_ram_is_shared(new_block);
- RAMBlock *block;
- RAMBlock *last_block = NULL;
ram_addr_t old_ram_size, new_ram_size;
Error *err = NULL;
@@ -1829,28 +1864,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
if (new_ram_size > old_ram_size) {
dirty_memory_extend(old_ram_size, new_ram_size);
}
- /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
- * QLIST (which has an RCU-friendly variant) does not have insertion at
- * tail, so save the last element in last_block.
- */
- RAMBLOCK_FOREACH(block) {
- last_block = block;
- if (block->max_length < new_block->max_length) {
- break;
- }
- }
- if (block) {
- QLIST_INSERT_BEFORE_RCU(block, new_block, next);
- } else if (last_block) {
- QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
- } else { /* list is empty */
- QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
- }
- ram_list.mru_block = NULL;
+
+ ram_block_add_list_locked(new_block);
- /* Write list before version */
- smp_wmb();
- ram_list.version++;
qemu_mutex_unlock_ramlist();
cpu_physical_memory_set_dirty_range(new_block->offset,
--
2.39.2
|