diff options
3 files changed, 5219 insertions, 0 deletions
diff --git a/meta-oe/recipes-devtools/nodejs/nodejs/0001-Revert-io_uring-changes-from-libuv-1.46.0.patch b/meta-oe/recipes-devtools/nodejs/nodejs/0001-Revert-io_uring-changes-from-libuv-1.46.0.patch new file mode 100644 index 0000000000..d98463d0d8 --- /dev/null +++ b/meta-oe/recipes-devtools/nodejs/nodejs/0001-Revert-io_uring-changes-from-libuv-1.46.0.patch | |||
| @@ -0,0 +1,3414 @@ | |||
| 1 | From 937dfa70104b7401a7570a98cf6e0a534e250557 Mon Sep 17 00:00:00 2001 | ||
| 2 | From: Martin Jansa <martin.jansa@gmail.com> | ||
| 3 | Date: Wed, 18 Oct 2023 22:43:55 +0200 | ||
| 4 | Subject: [PATCH] Revert io_uring changes from libuv-1.46.0 | ||
| 5 | |||
| 6 | Included in nodejs-20.4.0 with the libuv upgrade to 1.46.0 in: | ||
| 7 | https://github.com/nodejs/node/commit/6199e1946c9abd59bd71a61870a4f6c85e100b18 | ||
| 8 | |||
| 9 | Revert "linux: work around io_uring IORING_OP_CLOSE bug (#4059)" | ||
| 10 | This reverts commit 1752791c9ea89dbf54e2a20a9d9f899119a2d179. | ||
| 11 | Revert "linux: fs_read to use io_uring if iovcnt > IOV_MAX (#4023)" | ||
| 12 | This reverts commit e7b96331703e929e75d93c574573c9736e34b0c0. | ||
| 13 | Revert "linux: add some more iouring backed fs ops (#4012)" | ||
| 14 | This reverts commit 962b8e626ceaaf296eecab1d008e8b70dc6da5e7. | ||
| 15 | Revert "android: disable io_uring support (#4016)" | ||
| 16 | This reverts commit 281e6185cc9e77c582ff6ca9e4c00d57e6b90d95. | ||
| 17 | Revert "unix,win: replace QUEUE with struct uv__queue (#4022)" | ||
| 18 | This reverts commit 1b01b786c0180d29f07dccbb98001a2b3148828a. | ||
| 19 | Revert "fs: use WTF-8 on Windows (#2970)" | ||
| 20 | This reverts commit 8f32a14afaaa47514a7d28e1e069a8329e2dd939. | ||
| 21 | |||
| 22 | Dropped deps/uv/docs deps/uv/test changes as these dirs aren't included | ||
| 23 | in nodejs tarballs. | ||
| 24 | |||
| 25 | Signed-off-by: Martin Jansa <martin.jansa@gmail.com> | ||
| 26 | --- | ||
| 27 | Upstream-Status: Inappropriate [OE specific] | ||
| 28 | |||
| 29 | deps/uv/include/uv.h | 10 +- | ||
| 30 | deps/uv/include/uv/darwin.h | 6 +- | ||
| 31 | deps/uv/include/uv/linux.h | 2 +- | ||
| 32 | deps/uv/include/uv/threadpool.h | 2 +- | ||
| 33 | deps/uv/include/uv/unix.h | 44 ++-- | ||
| 34 | deps/uv/include/uv/win.h | 4 +- | ||
| 35 | deps/uv/src/queue.h | 154 ++++++----- | ||
| 36 | deps/uv/src/threadpool.c | 72 +++--- | ||
| 37 | deps/uv/src/unix/aix.c | 14 +- | ||
| 38 | deps/uv/src/unix/async.c | 52 ++-- | ||
| 39 | deps/uv/src/unix/core.c | 53 ++-- | ||
| 40 | deps/uv/src/unix/fs.c | 41 +-- | ||
| 41 | deps/uv/src/unix/fsevents.c | 84 +++--- | ||
| 42 | deps/uv/src/unix/internal.h | 11 - | ||
| 43 | deps/uv/src/unix/kqueue.c | 18 +- | ||
| 44 | deps/uv/src/unix/linux.c | 257 +++---------------- | ||
| 45 | deps/uv/src/unix/loop-watcher.c | 20 +- | ||
| 46 | deps/uv/src/unix/loop.c | 28 +- | ||
| 47 | deps/uv/src/unix/os390-syscalls.c | 24 +- | ||
| 48 | deps/uv/src/unix/os390-syscalls.h | 2 +- | ||
| 49 | deps/uv/src/unix/os390.c | 14 +- | ||
| 50 | deps/uv/src/unix/pipe.c | 2 +- | ||
| 51 | deps/uv/src/unix/posix-poll.c | 14 +- | ||
| 52 | deps/uv/src/unix/process.c | 38 +-- | ||
| 53 | deps/uv/src/unix/signal.c | 8 +- | ||
| 54 | deps/uv/src/unix/stream.c | 56 ++-- | ||
| 55 | deps/uv/src/unix/sunos.c | 18 +- | ||
| 56 | deps/uv/src/unix/tcp.c | 4 +- | ||
| 57 | deps/uv/src/unix/tty.c | 2 +- | ||
| 58 | deps/uv/src/unix/udp.c | 76 +++--- | ||
| 59 | deps/uv/src/uv-common.c | 28 +- | ||
| 60 | deps/uv/src/uv-common.h | 3 +- | ||
| 61 | deps/uv/src/win/core.c | 6 +- | ||
| 62 | deps/uv/src/win/fs.c | 311 ++++++++--------------- | ||
| 63 | deps/uv/src/win/handle-inl.h | 2 +- | ||
| 64 | deps/uv/src/win/pipe.c | 26 +- | ||
| 65 | deps/uv/src/win/tcp.c | 4 +- | ||
| 66 | deps/uv/src/win/udp.c | 4 +- | ||
| 67 | deps/uv/test/test-fs.c | 53 ---- | ||
| 68 | deps/uv/test/test-list.h | 2 - | ||
| 69 | deps/uv/test/test-queue-foreach-delete.c | 5 +- | ||
| 70 | deps/uv/test/test-threadpool-cancel.c | 5 - | ||
| 71 | 42 files changed, 625 insertions(+), 954 deletions(-) | ||
| 72 | |||
| 73 | diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h | ||
| 74 | index 02397dd0fdd..1af95289776 100644 | ||
| 75 | --- a/deps/uv/include/uv.h | ||
| 76 | +++ b/deps/uv/include/uv.h | ||
| 77 | @@ -59,12 +59,6 @@ extern "C" { | ||
| 78 | #include <stdio.h> | ||
| 79 | #include <stdint.h> | ||
| 80 | |||
| 81 | -/* Internal type, do not use. */ | ||
| 82 | -struct uv__queue { | ||
| 83 | - struct uv__queue* next; | ||
| 84 | - struct uv__queue* prev; | ||
| 85 | -}; | ||
| 86 | - | ||
| 87 | #if defined(_WIN32) | ||
| 88 | # include "uv/win.h" | ||
| 89 | #else | ||
| 90 | @@ -466,7 +460,7 @@ struct uv_shutdown_s { | ||
| 91 | uv_handle_type type; \ | ||
| 92 | /* private */ \ | ||
| 93 | uv_close_cb close_cb; \ | ||
| 94 | - struct uv__queue handle_queue; \ | ||
| 95 | + void* handle_queue[2]; \ | ||
| 96 | union { \ | ||
| 97 | int fd; \ | ||
| 98 | void* reserved[4]; \ | ||
| 99 | @@ -1870,7 +1864,7 @@ struct uv_loop_s { | ||
| 100 | void* data; | ||
| 101 | /* Loop reference counting. */ | ||
| 102 | unsigned int active_handles; | ||
| 103 | - struct uv__queue handle_queue; | ||
| 104 | + void* handle_queue[2]; | ||
| 105 | union { | ||
| 106 | void* unused; | ||
| 107 | unsigned int count; | ||
| 108 | diff --git a/deps/uv/include/uv/darwin.h b/deps/uv/include/uv/darwin.h | ||
| 109 | index 06962bfda80..d226415820b 100644 | ||
| 110 | --- a/deps/uv/include/uv/darwin.h | ||
| 111 | +++ b/deps/uv/include/uv/darwin.h | ||
| 112 | @@ -40,7 +40,7 @@ | ||
| 113 | void* cf_state; \ | ||
| 114 | uv_mutex_t cf_mutex; \ | ||
| 115 | uv_sem_t cf_sem; \ | ||
| 116 | - struct uv__queue cf_signals; \ | ||
| 117 | + void* cf_signals[2]; \ | ||
| 118 | |||
| 119 | #define UV_PLATFORM_FS_EVENT_FIELDS \ | ||
| 120 | uv__io_t event_watcher; \ | ||
| 121 | @@ -48,8 +48,8 @@ | ||
| 122 | int realpath_len; \ | ||
| 123 | int cf_flags; \ | ||
| 124 | uv_async_t* cf_cb; \ | ||
| 125 | - struct uv__queue cf_events; \ | ||
| 126 | - struct uv__queue cf_member; \ | ||
| 127 | + void* cf_events[2]; \ | ||
| 128 | + void* cf_member[2]; \ | ||
| 129 | int cf_error; \ | ||
| 130 | uv_mutex_t cf_mutex; \ | ||
| 131 | |||
| 132 | diff --git a/deps/uv/include/uv/linux.h b/deps/uv/include/uv/linux.h | ||
| 133 | index 9f22f8cf726..9b38405a190 100644 | ||
| 134 | --- a/deps/uv/include/uv/linux.h | ||
| 135 | +++ b/deps/uv/include/uv/linux.h | ||
| 136 | @@ -28,7 +28,7 @@ | ||
| 137 | int inotify_fd; \ | ||
| 138 | |||
| 139 | #define UV_PLATFORM_FS_EVENT_FIELDS \ | ||
| 140 | - struct uv__queue watchers; \ | ||
| 141 | + void* watchers[2]; \ | ||
| 142 | int wd; \ | ||
| 143 | |||
| 144 | #endif /* UV_LINUX_H */ | ||
| 145 | diff --git a/deps/uv/include/uv/threadpool.h b/deps/uv/include/uv/threadpool.h | ||
| 146 | index 24ce916fda4..9708ebdd530 100644 | ||
| 147 | --- a/deps/uv/include/uv/threadpool.h | ||
| 148 | +++ b/deps/uv/include/uv/threadpool.h | ||
| 149 | @@ -31,7 +31,7 @@ struct uv__work { | ||
| 150 | void (*work)(struct uv__work *w); | ||
| 151 | void (*done)(struct uv__work *w, int status); | ||
| 152 | struct uv_loop_s* loop; | ||
| 153 | - struct uv__queue wq; | ||
| 154 | + void* wq[2]; | ||
| 155 | }; | ||
| 156 | |||
| 157 | #endif /* UV_THREADPOOL_H_ */ | ||
| 158 | diff --git a/deps/uv/include/uv/unix.h b/deps/uv/include/uv/unix.h | ||
| 159 | index 09f88a56742..95447b8dd67 100644 | ||
| 160 | --- a/deps/uv/include/uv/unix.h | ||
| 161 | +++ b/deps/uv/include/uv/unix.h | ||
| 162 | @@ -92,8 +92,8 @@ typedef struct uv__io_s uv__io_t; | ||
| 163 | |||
| 164 | struct uv__io_s { | ||
| 165 | uv__io_cb cb; | ||
| 166 | - struct uv__queue pending_queue; | ||
| 167 | - struct uv__queue watcher_queue; | ||
| 168 | + void* pending_queue[2]; | ||
| 169 | + void* watcher_queue[2]; | ||
| 170 | unsigned int pevents; /* Pending event mask i.e. mask at next tick. */ | ||
| 171 | unsigned int events; /* Current event mask. */ | ||
| 172 | int fd; | ||
| 173 | @@ -220,21 +220,21 @@ typedef struct { | ||
| 174 | #define UV_LOOP_PRIVATE_FIELDS \ | ||
| 175 | unsigned long flags; \ | ||
| 176 | int backend_fd; \ | ||
| 177 | - struct uv__queue pending_queue; \ | ||
| 178 | - struct uv__queue watcher_queue; \ | ||
| 179 | + void* pending_queue[2]; \ | ||
| 180 | + void* watcher_queue[2]; \ | ||
| 181 | uv__io_t** watchers; \ | ||
| 182 | unsigned int nwatchers; \ | ||
| 183 | unsigned int nfds; \ | ||
| 184 | - struct uv__queue wq; \ | ||
| 185 | + void* wq[2]; \ | ||
| 186 | uv_mutex_t wq_mutex; \ | ||
| 187 | uv_async_t wq_async; \ | ||
| 188 | uv_rwlock_t cloexec_lock; \ | ||
| 189 | uv_handle_t* closing_handles; \ | ||
| 190 | - struct uv__queue process_handles; \ | ||
| 191 | - struct uv__queue prepare_handles; \ | ||
| 192 | - struct uv__queue check_handles; \ | ||
| 193 | - struct uv__queue idle_handles; \ | ||
| 194 | - struct uv__queue async_handles; \ | ||
| 195 | + void* process_handles[2]; \ | ||
| 196 | + void* prepare_handles[2]; \ | ||
| 197 | + void* check_handles[2]; \ | ||
| 198 | + void* idle_handles[2]; \ | ||
| 199 | + void* async_handles[2]; \ | ||
| 200 | void (*async_unused)(void); /* TODO(bnoordhuis) Remove in libuv v2. */ \ | ||
| 201 | uv__io_t async_io_watcher; \ | ||
| 202 | int async_wfd; \ | ||
| 203 | @@ -257,7 +257,7 @@ typedef struct { | ||
| 204 | #define UV_PRIVATE_REQ_TYPES /* empty */ | ||
| 205 | |||
| 206 | #define UV_WRITE_PRIVATE_FIELDS \ | ||
| 207 | - struct uv__queue queue; \ | ||
| 208 | + void* queue[2]; \ | ||
| 209 | unsigned int write_index; \ | ||
| 210 | uv_buf_t* bufs; \ | ||
| 211 | unsigned int nbufs; \ | ||
| 212 | @@ -265,12 +265,12 @@ typedef struct { | ||
| 213 | uv_buf_t bufsml[4]; \ | ||
| 214 | |||
| 215 | #define UV_CONNECT_PRIVATE_FIELDS \ | ||
| 216 | - struct uv__queue queue; \ | ||
| 217 | + void* queue[2]; \ | ||
| 218 | |||
| 219 | #define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */ | ||
| 220 | |||
| 221 | #define UV_UDP_SEND_PRIVATE_FIELDS \ | ||
| 222 | - struct uv__queue queue; \ | ||
| 223 | + void* queue[2]; \ | ||
| 224 | struct sockaddr_storage addr; \ | ||
| 225 | unsigned int nbufs; \ | ||
| 226 | uv_buf_t* bufs; \ | ||
| 227 | @@ -286,8 +286,8 @@ typedef struct { | ||
| 228 | uv_connect_t *connect_req; \ | ||
| 229 | uv_shutdown_t *shutdown_req; \ | ||
| 230 | uv__io_t io_watcher; \ | ||
| 231 | - struct uv__queue write_queue; \ | ||
| 232 | - struct uv__queue write_completed_queue; \ | ||
| 233 | + void* write_queue[2]; \ | ||
| 234 | + void* write_completed_queue[2]; \ | ||
| 235 | uv_connection_cb connection_cb; \ | ||
| 236 | int delayed_error; \ | ||
| 237 | int accepted_fd; \ | ||
| 238 | @@ -300,8 +300,8 @@ typedef struct { | ||
| 239 | uv_alloc_cb alloc_cb; \ | ||
| 240 | uv_udp_recv_cb recv_cb; \ | ||
| 241 | uv__io_t io_watcher; \ | ||
| 242 | - struct uv__queue write_queue; \ | ||
| 243 | - struct uv__queue write_completed_queue; \ | ||
| 244 | + void* write_queue[2]; \ | ||
| 245 | + void* write_completed_queue[2]; \ | ||
| 246 | |||
| 247 | #define UV_PIPE_PRIVATE_FIELDS \ | ||
| 248 | const char* pipe_fname; /* NULL or strdup'ed */ | ||
| 249 | @@ -311,19 +311,19 @@ typedef struct { | ||
| 250 | |||
| 251 | #define UV_PREPARE_PRIVATE_FIELDS \ | ||
| 252 | uv_prepare_cb prepare_cb; \ | ||
| 253 | - struct uv__queue queue; \ | ||
| 254 | + void* queue[2]; \ | ||
| 255 | |||
| 256 | #define UV_CHECK_PRIVATE_FIELDS \ | ||
| 257 | uv_check_cb check_cb; \ | ||
| 258 | - struct uv__queue queue; \ | ||
| 259 | + void* queue[2]; \ | ||
| 260 | |||
| 261 | #define UV_IDLE_PRIVATE_FIELDS \ | ||
| 262 | uv_idle_cb idle_cb; \ | ||
| 263 | - struct uv__queue queue; \ | ||
| 264 | + void* queue[2]; \ | ||
| 265 | |||
| 266 | #define UV_ASYNC_PRIVATE_FIELDS \ | ||
| 267 | uv_async_cb async_cb; \ | ||
| 268 | - struct uv__queue queue; \ | ||
| 269 | + void* queue[2]; \ | ||
| 270 | int pending; \ | ||
| 271 | |||
| 272 | #define UV_TIMER_PRIVATE_FIELDS \ | ||
| 273 | @@ -352,7 +352,7 @@ typedef struct { | ||
| 274 | int retcode; | ||
| 275 | |||
| 276 | #define UV_PROCESS_PRIVATE_FIELDS \ | ||
| 277 | - struct uv__queue queue; \ | ||
| 278 | + void* queue[2]; \ | ||
| 279 | int status; \ | ||
| 280 | |||
| 281 | #define UV_FS_PRIVATE_FIELDS \ | ||
| 282 | diff --git a/deps/uv/include/uv/win.h b/deps/uv/include/uv/win.h | ||
| 283 | index 6f8c47298e4..92a95fa15f1 100644 | ||
| 284 | --- a/deps/uv/include/uv/win.h | ||
| 285 | +++ b/deps/uv/include/uv/win.h | ||
| 286 | @@ -357,7 +357,7 @@ typedef struct { | ||
| 287 | /* Counter to started timer */ \ | ||
| 288 | uint64_t timer_counter; \ | ||
| 289 | /* Threadpool */ \ | ||
| 290 | - struct uv__queue wq; \ | ||
| 291 | + void* wq[2]; \ | ||
| 292 | uv_mutex_t wq_mutex; \ | ||
| 293 | uv_async_t wq_async; | ||
| 294 | |||
| 295 | @@ -486,7 +486,7 @@ typedef struct { | ||
| 296 | uint32_t payload_remaining; \ | ||
| 297 | uint64_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \ | ||
| 298 | } ipc_data_frame; \ | ||
| 299 | - struct uv__queue ipc_xfer_queue; \ | ||
| 300 | + void* ipc_xfer_queue[2]; \ | ||
| 301 | int ipc_xfer_queue_length; \ | ||
| 302 | uv_write_t* non_overlapped_writes_tail; \ | ||
| 303 | CRITICAL_SECTION readfile_thread_lock; \ | ||
| 304 | diff --git a/deps/uv/src/queue.h b/deps/uv/src/queue.h | ||
| 305 | index 5f8489e9bc5..ff3540a0a51 100644 | ||
| 306 | --- a/deps/uv/src/queue.h | ||
| 307 | +++ b/deps/uv/src/queue.h | ||
| 308 | @@ -18,73 +18,91 @@ | ||
| 309 | |||
| 310 | #include <stddef.h> | ||
| 311 | |||
| 312 | -#define uv__queue_data(pointer, type, field) \ | ||
| 313 | - ((type*) ((char*) (pointer) - offsetof(type, field))) | ||
| 314 | - | ||
| 315 | -#define uv__queue_foreach(q, h) \ | ||
| 316 | - for ((q) = (h)->next; (q) != (h); (q) = (q)->next) | ||
| 317 | - | ||
| 318 | -static inline void uv__queue_init(struct uv__queue* q) { | ||
| 319 | - q->next = q; | ||
| 320 | - q->prev = q; | ||
| 321 | -} | ||
| 322 | - | ||
| 323 | -static inline int uv__queue_empty(const struct uv__queue* q) { | ||
| 324 | - return q == q->next; | ||
| 325 | -} | ||
| 326 | - | ||
| 327 | -static inline struct uv__queue* uv__queue_head(const struct uv__queue* q) { | ||
| 328 | - return q->next; | ||
| 329 | -} | ||
| 330 | - | ||
| 331 | -static inline struct uv__queue* uv__queue_next(const struct uv__queue* q) { | ||
| 332 | - return q->next; | ||
| 333 | -} | ||
| 334 | - | ||
| 335 | -static inline void uv__queue_add(struct uv__queue* h, struct uv__queue* n) { | ||
| 336 | - h->prev->next = n->next; | ||
| 337 | - n->next->prev = h->prev; | ||
| 338 | - h->prev = n->prev; | ||
| 339 | - h->prev->next = h; | ||
| 340 | -} | ||
| 341 | - | ||
| 342 | -static inline void uv__queue_split(struct uv__queue* h, | ||
| 343 | - struct uv__queue* q, | ||
| 344 | - struct uv__queue* n) { | ||
| 345 | - n->prev = h->prev; | ||
| 346 | - n->prev->next = n; | ||
| 347 | - n->next = q; | ||
| 348 | - h->prev = q->prev; | ||
| 349 | - h->prev->next = h; | ||
| 350 | - q->prev = n; | ||
| 351 | -} | ||
| 352 | - | ||
| 353 | -static inline void uv__queue_move(struct uv__queue* h, struct uv__queue* n) { | ||
| 354 | - if (uv__queue_empty(h)) | ||
| 355 | - uv__queue_init(n); | ||
| 356 | - else | ||
| 357 | - uv__queue_split(h, h->next, n); | ||
| 358 | -} | ||
| 359 | - | ||
| 360 | -static inline void uv__queue_insert_head(struct uv__queue* h, | ||
| 361 | - struct uv__queue* q) { | ||
| 362 | - q->next = h->next; | ||
| 363 | - q->prev = h; | ||
| 364 | - q->next->prev = q; | ||
| 365 | - h->next = q; | ||
| 366 | -} | ||
| 367 | - | ||
| 368 | -static inline void uv__queue_insert_tail(struct uv__queue* h, | ||
| 369 | - struct uv__queue* q) { | ||
| 370 | - q->next = h; | ||
| 371 | - q->prev = h->prev; | ||
| 372 | - q->prev->next = q; | ||
| 373 | - h->prev = q; | ||
| 374 | -} | ||
| 375 | - | ||
| 376 | -static inline void uv__queue_remove(struct uv__queue* q) { | ||
| 377 | - q->prev->next = q->next; | ||
| 378 | - q->next->prev = q->prev; | ||
| 379 | -} | ||
| 380 | +typedef void *QUEUE[2]; | ||
| 381 | + | ||
| 382 | +/* Private macros. */ | ||
| 383 | +#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0])) | ||
| 384 | +#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1])) | ||
| 385 | +#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q))) | ||
| 386 | +#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q))) | ||
| 387 | + | ||
| 388 | +/* Public macros. */ | ||
| 389 | +#define QUEUE_DATA(ptr, type, field) \ | ||
| 390 | + ((type *) ((char *) (ptr) - offsetof(type, field))) | ||
| 391 | + | ||
| 392 | +/* Important note: mutating the list while QUEUE_FOREACH is | ||
| 393 | + * iterating over its elements results in undefined behavior. | ||
| 394 | + */ | ||
| 395 | +#define QUEUE_FOREACH(q, h) \ | ||
| 396 | + for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q)) | ||
| 397 | + | ||
| 398 | +#define QUEUE_EMPTY(q) \ | ||
| 399 | + ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q)) | ||
| 400 | + | ||
| 401 | +#define QUEUE_HEAD(q) \ | ||
| 402 | + (QUEUE_NEXT(q)) | ||
| 403 | + | ||
| 404 | +#define QUEUE_INIT(q) \ | ||
| 405 | + do { \ | ||
| 406 | + QUEUE_NEXT(q) = (q); \ | ||
| 407 | + QUEUE_PREV(q) = (q); \ | ||
| 408 | + } \ | ||
| 409 | + while (0) | ||
| 410 | + | ||
| 411 | +#define QUEUE_ADD(h, n) \ | ||
| 412 | + do { \ | ||
| 413 | + QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \ | ||
| 414 | + QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \ | ||
| 415 | + QUEUE_PREV(h) = QUEUE_PREV(n); \ | ||
| 416 | + QUEUE_PREV_NEXT(h) = (h); \ | ||
| 417 | + } \ | ||
| 418 | + while (0) | ||
| 419 | + | ||
| 420 | +#define QUEUE_SPLIT(h, q, n) \ | ||
| 421 | + do { \ | ||
| 422 | + QUEUE_PREV(n) = QUEUE_PREV(h); \ | ||
| 423 | + QUEUE_PREV_NEXT(n) = (n); \ | ||
| 424 | + QUEUE_NEXT(n) = (q); \ | ||
| 425 | + QUEUE_PREV(h) = QUEUE_PREV(q); \ | ||
| 426 | + QUEUE_PREV_NEXT(h) = (h); \ | ||
| 427 | + QUEUE_PREV(q) = (n); \ | ||
| 428 | + } \ | ||
| 429 | + while (0) | ||
| 430 | + | ||
| 431 | +#define QUEUE_MOVE(h, n) \ | ||
| 432 | + do { \ | ||
| 433 | + if (QUEUE_EMPTY(h)) \ | ||
| 434 | + QUEUE_INIT(n); \ | ||
| 435 | + else { \ | ||
| 436 | + QUEUE* q = QUEUE_HEAD(h); \ | ||
| 437 | + QUEUE_SPLIT(h, q, n); \ | ||
| 438 | + } \ | ||
| 439 | + } \ | ||
| 440 | + while (0) | ||
| 441 | + | ||
| 442 | +#define QUEUE_INSERT_HEAD(h, q) \ | ||
| 443 | + do { \ | ||
| 444 | + QUEUE_NEXT(q) = QUEUE_NEXT(h); \ | ||
| 445 | + QUEUE_PREV(q) = (h); \ | ||
| 446 | + QUEUE_NEXT_PREV(q) = (q); \ | ||
| 447 | + QUEUE_NEXT(h) = (q); \ | ||
| 448 | + } \ | ||
| 449 | + while (0) | ||
| 450 | + | ||
| 451 | +#define QUEUE_INSERT_TAIL(h, q) \ | ||
| 452 | + do { \ | ||
| 453 | + QUEUE_NEXT(q) = (h); \ | ||
| 454 | + QUEUE_PREV(q) = QUEUE_PREV(h); \ | ||
| 455 | + QUEUE_PREV_NEXT(q) = (q); \ | ||
| 456 | + QUEUE_PREV(h) = (q); \ | ||
| 457 | + } \ | ||
| 458 | + while (0) | ||
| 459 | + | ||
| 460 | +#define QUEUE_REMOVE(q) \ | ||
| 461 | + do { \ | ||
| 462 | + QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \ | ||
| 463 | + QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \ | ||
| 464 | + } \ | ||
| 465 | + while (0) | ||
| 466 | |||
| 467 | #endif /* QUEUE_H_ */ | ||
| 468 | diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c | ||
| 469 | index dbef67f2f10..51962bf0021 100644 | ||
| 470 | --- a/deps/uv/src/threadpool.c | ||
| 471 | +++ b/deps/uv/src/threadpool.c | ||
| 472 | @@ -37,10 +37,10 @@ static unsigned int slow_io_work_running; | ||
| 473 | static unsigned int nthreads; | ||
| 474 | static uv_thread_t* threads; | ||
| 475 | static uv_thread_t default_threads[4]; | ||
| 476 | -static struct uv__queue exit_message; | ||
| 477 | -static struct uv__queue wq; | ||
| 478 | -static struct uv__queue run_slow_work_message; | ||
| 479 | -static struct uv__queue slow_io_pending_wq; | ||
| 480 | +static QUEUE exit_message; | ||
| 481 | +static QUEUE wq; | ||
| 482 | +static QUEUE run_slow_work_message; | ||
| 483 | +static QUEUE slow_io_pending_wq; | ||
| 484 | |||
| 485 | static unsigned int slow_work_thread_threshold(void) { | ||
| 486 | return (nthreads + 1) / 2; | ||
| 487 | @@ -56,7 +56,7 @@ static void uv__cancelled(struct uv__work* w) { | ||
| 488 | */ | ||
| 489 | static void worker(void* arg) { | ||
| 490 | struct uv__work* w; | ||
| 491 | - struct uv__queue* q; | ||
| 492 | + QUEUE* q; | ||
| 493 | int is_slow_work; | ||
| 494 | |||
| 495 | uv_sem_post((uv_sem_t*) arg); | ||
| 496 | @@ -68,49 +68,49 @@ static void worker(void* arg) { | ||
| 497 | |||
| 498 | /* Keep waiting while either no work is present or only slow I/O | ||
| 499 | and we're at the threshold for that. */ | ||
| 500 | - while (uv__queue_empty(&wq) || | ||
| 501 | - (uv__queue_head(&wq) == &run_slow_work_message && | ||
| 502 | - uv__queue_next(&run_slow_work_message) == &wq && | ||
| 503 | + while (QUEUE_EMPTY(&wq) || | ||
| 504 | + (QUEUE_HEAD(&wq) == &run_slow_work_message && | ||
| 505 | + QUEUE_NEXT(&run_slow_work_message) == &wq && | ||
| 506 | slow_io_work_running >= slow_work_thread_threshold())) { | ||
| 507 | idle_threads += 1; | ||
| 508 | uv_cond_wait(&cond, &mutex); | ||
| 509 | idle_threads -= 1; | ||
| 510 | } | ||
| 511 | |||
| 512 | - q = uv__queue_head(&wq); | ||
| 513 | + q = QUEUE_HEAD(&wq); | ||
| 514 | if (q == &exit_message) { | ||
| 515 | uv_cond_signal(&cond); | ||
| 516 | uv_mutex_unlock(&mutex); | ||
| 517 | break; | ||
| 518 | } | ||
| 519 | |||
| 520 | - uv__queue_remove(q); | ||
| 521 | - uv__queue_init(q); /* Signal uv_cancel() that the work req is executing. */ | ||
| 522 | + QUEUE_REMOVE(q); | ||
| 523 | + QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */ | ||
| 524 | |||
| 525 | is_slow_work = 0; | ||
| 526 | if (q == &run_slow_work_message) { | ||
| 527 | /* If we're at the slow I/O threshold, re-schedule until after all | ||
| 528 | other work in the queue is done. */ | ||
| 529 | if (slow_io_work_running >= slow_work_thread_threshold()) { | ||
| 530 | - uv__queue_insert_tail(&wq, q); | ||
| 531 | + QUEUE_INSERT_TAIL(&wq, q); | ||
| 532 | continue; | ||
| 533 | } | ||
| 534 | |||
| 535 | /* If we encountered a request to run slow I/O work but there is none | ||
| 536 | to run, that means it's cancelled => Start over. */ | ||
| 537 | - if (uv__queue_empty(&slow_io_pending_wq)) | ||
| 538 | + if (QUEUE_EMPTY(&slow_io_pending_wq)) | ||
| 539 | continue; | ||
| 540 | |||
| 541 | is_slow_work = 1; | ||
| 542 | slow_io_work_running++; | ||
| 543 | |||
| 544 | - q = uv__queue_head(&slow_io_pending_wq); | ||
| 545 | - uv__queue_remove(q); | ||
| 546 | - uv__queue_init(q); | ||
| 547 | + q = QUEUE_HEAD(&slow_io_pending_wq); | ||
| 548 | + QUEUE_REMOVE(q); | ||
| 549 | + QUEUE_INIT(q); | ||
| 550 | |||
| 551 | /* If there is more slow I/O work, schedule it to be run as well. */ | ||
| 552 | - if (!uv__queue_empty(&slow_io_pending_wq)) { | ||
| 553 | - uv__queue_insert_tail(&wq, &run_slow_work_message); | ||
| 554 | + if (!QUEUE_EMPTY(&slow_io_pending_wq)) { | ||
| 555 | + QUEUE_INSERT_TAIL(&wq, &run_slow_work_message); | ||
| 556 | if (idle_threads > 0) | ||
| 557 | uv_cond_signal(&cond); | ||
| 558 | } | ||
| 559 | @@ -118,13 +118,13 @@ static void worker(void* arg) { | ||
| 560 | |||
| 561 | uv_mutex_unlock(&mutex); | ||
| 562 | |||
| 563 | - w = uv__queue_data(q, struct uv__work, wq); | ||
| 564 | + w = QUEUE_DATA(q, struct uv__work, wq); | ||
| 565 | w->work(w); | ||
| 566 | |||
| 567 | uv_mutex_lock(&w->loop->wq_mutex); | ||
| 568 | w->work = NULL; /* Signal uv_cancel() that the work req is done | ||
| 569 | executing. */ | ||
| 570 | - uv__queue_insert_tail(&w->loop->wq, &w->wq); | ||
| 571 | + QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); | ||
| 572 | uv_async_send(&w->loop->wq_async); | ||
| 573 | uv_mutex_unlock(&w->loop->wq_mutex); | ||
| 574 | |||
| 575 | @@ -139,12 +139,12 @@ static void worker(void* arg) { | ||
| 576 | } | ||
| 577 | |||
| 578 | |||
| 579 | -static void post(struct uv__queue* q, enum uv__work_kind kind) { | ||
| 580 | +static void post(QUEUE* q, enum uv__work_kind kind) { | ||
| 581 | uv_mutex_lock(&mutex); | ||
| 582 | if (kind == UV__WORK_SLOW_IO) { | ||
| 583 | /* Insert into a separate queue. */ | ||
| 584 | - uv__queue_insert_tail(&slow_io_pending_wq, q); | ||
| 585 | - if (!uv__queue_empty(&run_slow_work_message)) { | ||
| 586 | + QUEUE_INSERT_TAIL(&slow_io_pending_wq, q); | ||
| 587 | + if (!QUEUE_EMPTY(&run_slow_work_message)) { | ||
| 588 | /* Running slow I/O tasks is already scheduled => Nothing to do here. | ||
| 589 | The worker that runs said other task will schedule this one as well. */ | ||
| 590 | uv_mutex_unlock(&mutex); | ||
| 591 | @@ -153,7 +153,7 @@ static void post(struct uv__queue* q, enum uv__work_kind kind) { | ||
| 592 | q = &run_slow_work_message; | ||
| 593 | } | ||
| 594 | |||
| 595 | - uv__queue_insert_tail(&wq, q); | ||
| 596 | + QUEUE_INSERT_TAIL(&wq, q); | ||
| 597 | if (idle_threads > 0) | ||
| 598 | uv_cond_signal(&cond); | ||
| 599 | uv_mutex_unlock(&mutex); | ||
| 600 | @@ -220,9 +220,9 @@ static void init_threads(void) { | ||
| 601 | if (uv_mutex_init(&mutex)) | ||
| 602 | abort(); | ||
| 603 | |||
| 604 | - uv__queue_init(&wq); | ||
| 605 | - uv__queue_init(&slow_io_pending_wq); | ||
| 606 | - uv__queue_init(&run_slow_work_message); | ||
| 607 | + QUEUE_INIT(&wq); | ||
| 608 | + QUEUE_INIT(&slow_io_pending_wq); | ||
| 609 | + QUEUE_INIT(&run_slow_work_message); | ||
| 610 | |||
| 611 | if (uv_sem_init(&sem, 0)) | ||
| 612 | abort(); | ||
| 613 | @@ -285,9 +285,9 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||
| 614 | uv_mutex_lock(&mutex); | ||
| 615 | uv_mutex_lock(&w->loop->wq_mutex); | ||
| 616 | |||
| 617 | - cancelled = !uv__queue_empty(&w->wq) && w->work != NULL; | ||
| 618 | + cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; | ||
| 619 | if (cancelled) | ||
| 620 | - uv__queue_remove(&w->wq); | ||
| 621 | + QUEUE_REMOVE(&w->wq); | ||
| 622 | |||
| 623 | uv_mutex_unlock(&w->loop->wq_mutex); | ||
| 624 | uv_mutex_unlock(&mutex); | ||
| 625 | @@ -297,7 +297,7 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||
| 626 | |||
| 627 | w->work = uv__cancelled; | ||
| 628 | uv_mutex_lock(&loop->wq_mutex); | ||
| 629 | - uv__queue_insert_tail(&loop->wq, &w->wq); | ||
| 630 | + QUEUE_INSERT_TAIL(&loop->wq, &w->wq); | ||
| 631 | uv_async_send(&loop->wq_async); | ||
| 632 | uv_mutex_unlock(&loop->wq_mutex); | ||
| 633 | |||
| 634 | @@ -308,21 +308,21 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||
| 635 | void uv__work_done(uv_async_t* handle) { | ||
| 636 | struct uv__work* w; | ||
| 637 | uv_loop_t* loop; | ||
| 638 | - struct uv__queue* q; | ||
| 639 | - struct uv__queue wq; | ||
| 640 | + QUEUE* q; | ||
| 641 | + QUEUE wq; | ||
| 642 | int err; | ||
| 643 | int nevents; | ||
| 644 | |||
| 645 | loop = container_of(handle, uv_loop_t, wq_async); | ||
| 646 | uv_mutex_lock(&loop->wq_mutex); | ||
| 647 | - uv__queue_move(&loop->wq, &wq); | ||
| 648 | + QUEUE_MOVE(&loop->wq, &wq); | ||
| 649 | uv_mutex_unlock(&loop->wq_mutex); | ||
| 650 | |||
| 651 | nevents = 0; | ||
| 652 | |||
| 653 | - while (!uv__queue_empty(&wq)) { | ||
| 654 | - q = uv__queue_head(&wq); | ||
| 655 | - uv__queue_remove(q); | ||
| 656 | + while (!QUEUE_EMPTY(&wq)) { | ||
| 657 | + q = QUEUE_HEAD(&wq); | ||
| 658 | + QUEUE_REMOVE(q); | ||
| 659 | |||
| 660 | w = container_of(q, struct uv__work, wq); | ||
| 661 | err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; | ||
| 662 | diff --git a/deps/uv/src/unix/aix.c b/deps/uv/src/unix/aix.c | ||
| 663 | index 3af3009a216..f1afbed49ec 100644 | ||
| 664 | --- a/deps/uv/src/unix/aix.c | ||
| 665 | +++ b/deps/uv/src/unix/aix.c | ||
| 666 | @@ -136,7 +136,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 667 | struct pollfd pqry; | ||
| 668 | struct pollfd* pe; | ||
| 669 | struct poll_ctl pc; | ||
| 670 | - struct uv__queue* q; | ||
| 671 | + QUEUE* q; | ||
| 672 | uv__io_t* w; | ||
| 673 | uint64_t base; | ||
| 674 | uint64_t diff; | ||
| 675 | @@ -151,18 +151,18 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 676 | int reset_timeout; | ||
| 677 | |||
| 678 | if (loop->nfds == 0) { | ||
| 679 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 680 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 681 | return; | ||
| 682 | } | ||
| 683 | |||
| 684 | lfields = uv__get_internal_fields(loop); | ||
| 685 | |||
| 686 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 687 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 688 | - uv__queue_remove(q); | ||
| 689 | - uv__queue_init(q); | ||
| 690 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 691 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 692 | + QUEUE_REMOVE(q); | ||
| 693 | + QUEUE_INIT(q); | ||
| 694 | |||
| 695 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 696 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 697 | assert(w->pevents != 0); | ||
| 698 | assert(w->fd >= 0); | ||
| 699 | assert(w->fd < (int) loop->nwatchers); | ||
| 700 | diff --git a/deps/uv/src/unix/async.c b/deps/uv/src/unix/async.c | ||
| 701 | index 0ff2669e30a..5751b6d02be 100644 | ||
| 702 | --- a/deps/uv/src/unix/async.c | ||
| 703 | +++ b/deps/uv/src/unix/async.c | ||
| 704 | @@ -55,7 +55,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { | ||
| 705 | handle->pending = 0; | ||
| 706 | handle->u.fd = 0; /* This will be used as a busy flag. */ | ||
| 707 | |||
| 708 | - uv__queue_insert_tail(&loop->async_handles, &handle->queue); | ||
| 709 | + QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); | ||
| 710 | uv__handle_start(handle); | ||
| 711 | |||
| 712 | return 0; | ||
| 713 | @@ -124,7 +124,7 @@ static void uv__async_spin(uv_async_t* handle) { | ||
| 714 | |||
| 715 | void uv__async_close(uv_async_t* handle) { | ||
| 716 | uv__async_spin(handle); | ||
| 717 | - uv__queue_remove(&handle->queue); | ||
| 718 | + QUEUE_REMOVE(&handle->queue); | ||
| 719 | uv__handle_stop(handle); | ||
| 720 | } | ||
| 721 | |||
| 722 | @@ -132,8 +132,8 @@ void uv__async_close(uv_async_t* handle) { | ||
| 723 | static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 724 | char buf[1024]; | ||
| 725 | ssize_t r; | ||
| 726 | - struct uv__queue queue; | ||
| 727 | - struct uv__queue* q; | ||
| 728 | + QUEUE queue; | ||
| 729 | + QUEUE* q; | ||
| 730 | uv_async_t* h; | ||
| 731 | _Atomic int *pending; | ||
| 732 | |||
| 733 | @@ -157,13 +157,13 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 734 | abort(); | ||
| 735 | } | ||
| 736 | |||
| 737 | - uv__queue_move(&loop->async_handles, &queue); | ||
| 738 | - while (!uv__queue_empty(&queue)) { | ||
| 739 | - q = uv__queue_head(&queue); | ||
| 740 | - h = uv__queue_data(q, uv_async_t, queue); | ||
| 741 | + QUEUE_MOVE(&loop->async_handles, &queue); | ||
| 742 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 743 | + q = QUEUE_HEAD(&queue); | ||
| 744 | + h = QUEUE_DATA(q, uv_async_t, queue); | ||
| 745 | |||
| 746 | - uv__queue_remove(q); | ||
| 747 | - uv__queue_insert_tail(&loop->async_handles, q); | ||
| 748 | + QUEUE_REMOVE(q); | ||
| 749 | + QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||
| 750 | |||
| 751 | /* Atomically fetch and clear pending flag */ | ||
| 752 | pending = (_Atomic int*) &h->pending; | ||
| 753 | @@ -241,8 +241,8 @@ static int uv__async_start(uv_loop_t* loop) { | ||
| 754 | |||
| 755 | |||
| 756 | void uv__async_stop(uv_loop_t* loop) { | ||
| 757 | - struct uv__queue queue; | ||
| 758 | - struct uv__queue* q; | ||
| 759 | + QUEUE queue; | ||
| 760 | + QUEUE* q; | ||
| 761 | uv_async_t* h; | ||
| 762 | |||
| 763 | if (loop->async_io_watcher.fd == -1) | ||
| 764 | @@ -251,13 +251,13 @@ void uv__async_stop(uv_loop_t* loop) { | ||
| 765 | /* Make sure no other thread is accessing the async handle fd after the loop | ||
| 766 | * cleanup. | ||
| 767 | */ | ||
| 768 | - uv__queue_move(&loop->async_handles, &queue); | ||
| 769 | - while (!uv__queue_empty(&queue)) { | ||
| 770 | - q = uv__queue_head(&queue); | ||
| 771 | - h = uv__queue_data(q, uv_async_t, queue); | ||
| 772 | + QUEUE_MOVE(&loop->async_handles, &queue); | ||
| 773 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 774 | + q = QUEUE_HEAD(&queue); | ||
| 775 | + h = QUEUE_DATA(q, uv_async_t, queue); | ||
| 776 | |||
| 777 | - uv__queue_remove(q); | ||
| 778 | - uv__queue_insert_tail(&loop->async_handles, q); | ||
| 779 | + QUEUE_REMOVE(q); | ||
| 780 | + QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||
| 781 | |||
| 782 | uv__async_spin(h); | ||
| 783 | } | ||
| 784 | @@ -275,20 +275,20 @@ void uv__async_stop(uv_loop_t* loop) { | ||
| 785 | |||
| 786 | |||
| 787 | int uv__async_fork(uv_loop_t* loop) { | ||
| 788 | - struct uv__queue queue; | ||
| 789 | - struct uv__queue* q; | ||
| 790 | + QUEUE queue; | ||
| 791 | + QUEUE* q; | ||
| 792 | uv_async_t* h; | ||
| 793 | |||
| 794 | if (loop->async_io_watcher.fd == -1) /* never started */ | ||
| 795 | return 0; | ||
| 796 | |||
| 797 | - uv__queue_move(&loop->async_handles, &queue); | ||
| 798 | - while (!uv__queue_empty(&queue)) { | ||
| 799 | - q = uv__queue_head(&queue); | ||
| 800 | - h = uv__queue_data(q, uv_async_t, queue); | ||
| 801 | + QUEUE_MOVE(&loop->async_handles, &queue); | ||
| 802 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 803 | + q = QUEUE_HEAD(&queue); | ||
| 804 | + h = QUEUE_DATA(q, uv_async_t, queue); | ||
| 805 | |||
| 806 | - uv__queue_remove(q); | ||
| 807 | - uv__queue_insert_tail(&loop->async_handles, q); | ||
| 808 | + QUEUE_REMOVE(q); | ||
| 809 | + QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||
| 810 | |||
| 811 | /* The state of any thread that set pending is now likely corrupt in this | ||
| 812 | * child because the user called fork, so just clear these flags and move | ||
| 813 | diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c | ||
| 814 | index 25c5181f370..55aa962787e 100644 | ||
| 815 | --- a/deps/uv/src/unix/core.c | ||
| 816 | +++ b/deps/uv/src/unix/core.c | ||
| 817 | @@ -344,7 +344,7 @@ static void uv__finish_close(uv_handle_t* handle) { | ||
| 818 | } | ||
| 819 | |||
| 820 | uv__handle_unref(handle); | ||
| 821 | - uv__queue_remove(&handle->handle_queue); | ||
| 822 | + QUEUE_REMOVE(&handle->handle_queue); | ||
| 823 | |||
| 824 | if (handle->close_cb) { | ||
| 825 | handle->close_cb(handle); | ||
| 826 | @@ -380,7 +380,7 @@ int uv_backend_fd(const uv_loop_t* loop) { | ||
| 827 | static int uv__loop_alive(const uv_loop_t* loop) { | ||
| 828 | return uv__has_active_handles(loop) || | ||
| 829 | uv__has_active_reqs(loop) || | ||
| 830 | - !uv__queue_empty(&loop->pending_queue) || | ||
| 831 | + !QUEUE_EMPTY(&loop->pending_queue) || | ||
| 832 | loop->closing_handles != NULL; | ||
| 833 | } | ||
| 834 | |||
| 835 | @@ -389,8 +389,8 @@ static int uv__backend_timeout(const uv_loop_t* loop) { | ||
| 836 | if (loop->stop_flag == 0 && | ||
| 837 | /* uv__loop_alive(loop) && */ | ||
| 838 | (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) && | ||
| 839 | - uv__queue_empty(&loop->pending_queue) && | ||
| 840 | - uv__queue_empty(&loop->idle_handles) && | ||
| 841 | + QUEUE_EMPTY(&loop->pending_queue) && | ||
| 842 | + QUEUE_EMPTY(&loop->idle_handles) && | ||
| 843 | (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 && | ||
| 844 | loop->closing_handles == NULL) | ||
| 845 | return uv__next_timeout(loop); | ||
| 846 | @@ -399,7 +399,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) { | ||
| 847 | |||
| 848 | |||
| 849 | int uv_backend_timeout(const uv_loop_t* loop) { | ||
| 850 | - if (uv__queue_empty(&loop->watcher_queue)) | ||
| 851 | + if (QUEUE_EMPTY(&loop->watcher_queue)) | ||
| 852 | return uv__backend_timeout(loop); | ||
| 853 | /* Need to call uv_run to update the backend fd state. */ | ||
| 854 | return 0; | ||
| 855 | @@ -431,8 +431,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { | ||
| 856 | |||
| 857 | while (r != 0 && loop->stop_flag == 0) { | ||
| 858 | can_sleep = | ||
| 859 | - uv__queue_empty(&loop->pending_queue) && | ||
| 860 | - uv__queue_empty(&loop->idle_handles); | ||
| 861 | + QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles); | ||
| 862 | |||
| 863 | uv__run_pending(loop); | ||
| 864 | uv__run_idle(loop); | ||
| 865 | @@ -448,7 +447,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { | ||
| 866 | |||
| 867 | /* Process immediate callbacks (e.g. write_cb) a small fixed number of | ||
| 868 | * times to avoid loop starvation.*/ | ||
| 869 | - for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++) | ||
| 870 | + for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++) | ||
| 871 | uv__run_pending(loop); | ||
| 872 | |||
| 873 | /* Run one final update on the provider_idle_time in case uv__io_poll | ||
| 874 | @@ -827,17 +826,17 @@ int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { | ||
| 875 | |||
| 876 | |||
| 877 | static void uv__run_pending(uv_loop_t* loop) { | ||
| 878 | - struct uv__queue* q; | ||
| 879 | - struct uv__queue pq; | ||
| 880 | + QUEUE* q; | ||
| 881 | + QUEUE pq; | ||
| 882 | uv__io_t* w; | ||
| 883 | |||
| 884 | - uv__queue_move(&loop->pending_queue, &pq); | ||
| 885 | + QUEUE_MOVE(&loop->pending_queue, &pq); | ||
| 886 | |||
| 887 | - while (!uv__queue_empty(&pq)) { | ||
| 888 | - q = uv__queue_head(&pq); | ||
| 889 | - uv__queue_remove(q); | ||
| 890 | - uv__queue_init(q); | ||
| 891 | - w = uv__queue_data(q, uv__io_t, pending_queue); | ||
| 892 | + while (!QUEUE_EMPTY(&pq)) { | ||
| 893 | + q = QUEUE_HEAD(&pq); | ||
| 894 | + QUEUE_REMOVE(q); | ||
| 895 | + QUEUE_INIT(q); | ||
| 896 | + w = QUEUE_DATA(q, uv__io_t, pending_queue); | ||
| 897 | w->cb(loop, w, POLLOUT); | ||
| 898 | } | ||
| 899 | } | ||
| 900 | @@ -892,8 +891,8 @@ static void maybe_resize(uv_loop_t* loop, unsigned int len) { | ||
| 901 | void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { | ||
| 902 | assert(cb != NULL); | ||
| 903 | assert(fd >= -1); | ||
| 904 | - uv__queue_init(&w->pending_queue); | ||
| 905 | - uv__queue_init(&w->watcher_queue); | ||
| 906 | + QUEUE_INIT(&w->pending_queue); | ||
| 907 | + QUEUE_INIT(&w->watcher_queue); | ||
| 908 | w->cb = cb; | ||
| 909 | w->fd = fd; | ||
| 910 | w->events = 0; | ||
| 911 | @@ -919,8 +918,8 @@ void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 912 | return; | ||
| 913 | #endif | ||
| 914 | |||
| 915 | - if (uv__queue_empty(&w->watcher_queue)) | ||
| 916 | - uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||
| 917 | + if (QUEUE_EMPTY(&w->watcher_queue)) | ||
| 918 | + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||
| 919 | |||
| 920 | if (loop->watchers[w->fd] == NULL) { | ||
| 921 | loop->watchers[w->fd] = w; | ||
| 922 | @@ -945,8 +944,8 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 923 | w->pevents &= ~events; | ||
| 924 | |||
| 925 | if (w->pevents == 0) { | ||
| 926 | - uv__queue_remove(&w->watcher_queue); | ||
| 927 | - uv__queue_init(&w->watcher_queue); | ||
| 928 | + QUEUE_REMOVE(&w->watcher_queue); | ||
| 929 | + QUEUE_INIT(&w->watcher_queue); | ||
| 930 | w->events = 0; | ||
| 931 | |||
| 932 | if (w == loop->watchers[w->fd]) { | ||
| 933 | @@ -955,14 +954,14 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 934 | loop->nfds--; | ||
| 935 | } | ||
| 936 | } | ||
| 937 | - else if (uv__queue_empty(&w->watcher_queue)) | ||
| 938 | - uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||
| 939 | + else if (QUEUE_EMPTY(&w->watcher_queue)) | ||
| 940 | + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 944 | void uv__io_close(uv_loop_t* loop, uv__io_t* w) { | ||
| 945 | uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); | ||
| 946 | - uv__queue_remove(&w->pending_queue); | ||
| 947 | + QUEUE_REMOVE(&w->pending_queue); | ||
| 948 | |||
| 949 | /* Remove stale events for this file descriptor */ | ||
| 950 | if (w->fd != -1) | ||
| 951 | @@ -971,8 +970,8 @@ void uv__io_close(uv_loop_t* loop, uv__io_t* w) { | ||
| 952 | |||
| 953 | |||
| 954 | void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { | ||
| 955 | - if (uv__queue_empty(&w->pending_queue)) | ||
| 956 | - uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue); | ||
| 957 | + if (QUEUE_EMPTY(&w->pending_queue)) | ||
| 958 | + QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); | ||
| 959 | } | ||
| 960 | |||
| 961 | |||
| 962 | diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c | ||
| 963 | index 6b051c124f2..00d385c24b7 100644 | ||
| 964 | --- a/deps/uv/src/unix/fs.c | ||
| 965 | +++ b/deps/uv/src/unix/fs.c | ||
| 966 | @@ -62,6 +62,7 @@ | ||
| 967 | |||
| 968 | #if defined(__linux__) | ||
| 969 | # include <sys/sendfile.h> | ||
| 970 | +# include <sys/utsname.h> | ||
| 971 | #endif | ||
| 972 | |||
| 973 | #if defined(__sun) | ||
| 974 | @@ -903,6 +904,31 @@ out: | ||
| 975 | |||
| 976 | |||
| 977 | #ifdef __linux__ | ||
| 978 | +static unsigned uv__kernel_version(void) { | ||
| 979 | + static _Atomic unsigned cached_version; | ||
| 980 | + struct utsname u; | ||
| 981 | + unsigned version; | ||
| 982 | + unsigned major; | ||
| 983 | + unsigned minor; | ||
| 984 | + unsigned patch; | ||
| 985 | + | ||
| 986 | + version = atomic_load_explicit(&cached_version, memory_order_relaxed); | ||
| 987 | + if (version != 0) | ||
| 988 | + return version; | ||
| 989 | + | ||
| 990 | + if (-1 == uname(&u)) | ||
| 991 | + return 0; | ||
| 992 | + | ||
| 993 | + if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch)) | ||
| 994 | + return 0; | ||
| 995 | + | ||
| 996 | + version = major * 65536 + minor * 256 + patch; | ||
| 997 | + atomic_store_explicit(&cached_version, version, memory_order_relaxed); | ||
| 998 | + | ||
| 999 | + return version; | ||
| 1000 | +} | ||
| 1001 | + | ||
| 1002 | + | ||
| 1003 | /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command | ||
| 1004 | * in copy_file_range() when it shouldn't. There is no workaround except to | ||
| 1005 | * fall back to a regular copy. | ||
| 1006 | @@ -1905,9 +1931,6 @@ int uv_fs_link(uv_loop_t* loop, | ||
| 1007 | uv_fs_cb cb) { | ||
| 1008 | INIT(LINK); | ||
| 1009 | PATH2; | ||
| 1010 | - if (cb != NULL) | ||
| 1011 | - if (uv__iou_fs_link(loop, req)) | ||
| 1012 | - return 0; | ||
| 1013 | POST; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | @@ -1920,9 +1943,6 @@ int uv_fs_mkdir(uv_loop_t* loop, | ||
| 1017 | INIT(MKDIR); | ||
| 1018 | PATH; | ||
| 1019 | req->mode = mode; | ||
| 1020 | - if (cb != NULL) | ||
| 1021 | - if (uv__iou_fs_mkdir(loop, req)) | ||
| 1022 | - return 0; | ||
| 1023 | POST; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | @@ -2074,9 +2094,6 @@ int uv_fs_rename(uv_loop_t* loop, | ||
| 1027 | uv_fs_cb cb) { | ||
| 1028 | INIT(RENAME); | ||
| 1029 | PATH2; | ||
| 1030 | - if (cb != NULL) | ||
| 1031 | - if (uv__iou_fs_rename(loop, req)) | ||
| 1032 | - return 0; | ||
| 1033 | POST; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | @@ -2123,9 +2140,6 @@ int uv_fs_symlink(uv_loop_t* loop, | ||
| 1037 | INIT(SYMLINK); | ||
| 1038 | PATH2; | ||
| 1039 | req->flags = flags; | ||
| 1040 | - if (cb != NULL) | ||
| 1041 | - if (uv__iou_fs_symlink(loop, req)) | ||
| 1042 | - return 0; | ||
| 1043 | POST; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | @@ -2133,9 +2147,6 @@ int uv_fs_symlink(uv_loop_t* loop, | ||
| 1047 | int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { | ||
| 1048 | INIT(UNLINK); | ||
| 1049 | PATH; | ||
| 1050 | - if (cb != NULL) | ||
| 1051 | - if (uv__iou_fs_unlink(loop, req)) | ||
| 1052 | - return 0; | ||
| 1053 | POST; | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | diff --git a/deps/uv/src/unix/fsevents.c b/deps/uv/src/unix/fsevents.c | ||
| 1057 | index df703f3635f..0535b4547aa 100644 | ||
| 1058 | --- a/deps/uv/src/unix/fsevents.c | ||
| 1059 | +++ b/deps/uv/src/unix/fsevents.c | ||
| 1060 | @@ -80,13 +80,13 @@ enum uv__cf_loop_signal_type_e { | ||
| 1061 | typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t; | ||
| 1062 | |||
| 1063 | struct uv__cf_loop_signal_s { | ||
| 1064 | - struct uv__queue member; | ||
| 1065 | + QUEUE member; | ||
| 1066 | uv_fs_event_t* handle; | ||
| 1067 | uv__cf_loop_signal_type_t type; | ||
| 1068 | }; | ||
| 1069 | |||
| 1070 | struct uv__fsevents_event_s { | ||
| 1071 | - struct uv__queue member; | ||
| 1072 | + QUEUE member; | ||
| 1073 | int events; | ||
| 1074 | char path[1]; | ||
| 1075 | }; | ||
| 1076 | @@ -98,7 +98,7 @@ struct uv__cf_loop_state_s { | ||
| 1077 | FSEventStreamRef fsevent_stream; | ||
| 1078 | uv_sem_t fsevent_sem; | ||
| 1079 | uv_mutex_t fsevent_mutex; | ||
| 1080 | - struct uv__queue fsevent_handles; | ||
| 1081 | + void* fsevent_handles[2]; | ||
| 1082 | unsigned int fsevent_handle_count; | ||
| 1083 | }; | ||
| 1084 | |||
| 1085 | @@ -150,22 +150,22 @@ static void (*pFSEventStreamStop)(FSEventStreamRef); | ||
| 1086 | |||
| 1087 | #define UV__FSEVENTS_PROCESS(handle, block) \ | ||
| 1088 | do { \ | ||
| 1089 | - struct uv__queue events; \ | ||
| 1090 | - struct uv__queue* q; \ | ||
| 1091 | + QUEUE events; \ | ||
| 1092 | + QUEUE* q; \ | ||
| 1093 | uv__fsevents_event_t* event; \ | ||
| 1094 | int err; \ | ||
| 1095 | uv_mutex_lock(&(handle)->cf_mutex); \ | ||
| 1096 | /* Split-off all events and empty original queue */ \ | ||
| 1097 | - uv__queue_move(&(handle)->cf_events, &events); \ | ||
| 1098 | + QUEUE_MOVE(&(handle)->cf_events, &events); \ | ||
| 1099 | /* Get error (if any) and zero original one */ \ | ||
| 1100 | err = (handle)->cf_error; \ | ||
| 1101 | (handle)->cf_error = 0; \ | ||
| 1102 | uv_mutex_unlock(&(handle)->cf_mutex); \ | ||
| 1103 | /* Loop through events, deallocating each after processing */ \ | ||
| 1104 | - while (!uv__queue_empty(&events)) { \ | ||
| 1105 | - q = uv__queue_head(&events); \ | ||
| 1106 | - event = uv__queue_data(q, uv__fsevents_event_t, member); \ | ||
| 1107 | - uv__queue_remove(q); \ | ||
| 1108 | + while (!QUEUE_EMPTY(&events)) { \ | ||
| 1109 | + q = QUEUE_HEAD(&events); \ | ||
| 1110 | + event = QUEUE_DATA(q, uv__fsevents_event_t, member); \ | ||
| 1111 | + QUEUE_REMOVE(q); \ | ||
| 1112 | /* NOTE: Checking uv__is_active() is required here, because handle \ | ||
| 1113 | * callback may close handle and invoking it after it will lead to \ | ||
| 1114 | * incorrect behaviour */ \ | ||
| 1115 | @@ -193,14 +193,14 @@ static void uv__fsevents_cb(uv_async_t* cb) { | ||
| 1116 | |||
| 1117 | /* Runs in CF thread, pushed event into handle's event list */ | ||
| 1118 | static void uv__fsevents_push_event(uv_fs_event_t* handle, | ||
| 1119 | - struct uv__queue* events, | ||
| 1120 | + QUEUE* events, | ||
| 1121 | int err) { | ||
| 1122 | assert(events != NULL || err != 0); | ||
| 1123 | uv_mutex_lock(&handle->cf_mutex); | ||
| 1124 | |||
| 1125 | /* Concatenate two queues */ | ||
| 1126 | if (events != NULL) | ||
| 1127 | - uv__queue_add(&handle->cf_events, events); | ||
| 1128 | + QUEUE_ADD(&handle->cf_events, events); | ||
| 1129 | |||
| 1130 | /* Propagate error */ | ||
| 1131 | if (err != 0) | ||
| 1132 | @@ -224,12 +224,12 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||
| 1133 | char* path; | ||
| 1134 | char* pos; | ||
| 1135 | uv_fs_event_t* handle; | ||
| 1136 | - struct uv__queue* q; | ||
| 1137 | + QUEUE* q; | ||
| 1138 | uv_loop_t* loop; | ||
| 1139 | uv__cf_loop_state_t* state; | ||
| 1140 | uv__fsevents_event_t* event; | ||
| 1141 | FSEventStreamEventFlags flags; | ||
| 1142 | - struct uv__queue head; | ||
| 1143 | + QUEUE head; | ||
| 1144 | |||
| 1145 | loop = info; | ||
| 1146 | state = loop->cf_state; | ||
| 1147 | @@ -238,9 +238,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||
| 1148 | |||
| 1149 | /* For each handle */ | ||
| 1150 | uv_mutex_lock(&state->fsevent_mutex); | ||
| 1151 | - uv__queue_foreach(q, &state->fsevent_handles) { | ||
| 1152 | - handle = uv__queue_data(q, uv_fs_event_t, cf_member); | ||
| 1153 | - uv__queue_init(&head); | ||
| 1154 | + QUEUE_FOREACH(q, &state->fsevent_handles) { | ||
| 1155 | + handle = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||
| 1156 | + QUEUE_INIT(&head); | ||
| 1157 | |||
| 1158 | /* Process and filter out events */ | ||
| 1159 | for (i = 0; i < numEvents; i++) { | ||
| 1160 | @@ -318,10 +318,10 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||
| 1161 | event->events = UV_CHANGE; | ||
| 1162 | } | ||
| 1163 | |||
| 1164 | - uv__queue_insert_tail(&head, &event->member); | ||
| 1165 | + QUEUE_INSERT_TAIL(&head, &event->member); | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | - if (!uv__queue_empty(&head)) | ||
| 1169 | + if (!QUEUE_EMPTY(&head)) | ||
| 1170 | uv__fsevents_push_event(handle, &head, 0); | ||
| 1171 | } | ||
| 1172 | uv_mutex_unlock(&state->fsevent_mutex); | ||
| 1173 | @@ -403,7 +403,7 @@ static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) { | ||
| 1174 | static void uv__fsevents_reschedule(uv__cf_loop_state_t* state, | ||
| 1175 | uv_loop_t* loop, | ||
| 1176 | uv__cf_loop_signal_type_t type) { | ||
| 1177 | - struct uv__queue* q; | ||
| 1178 | + QUEUE* q; | ||
| 1179 | uv_fs_event_t* curr; | ||
| 1180 | CFArrayRef cf_paths; | ||
| 1181 | CFStringRef* paths; | ||
| 1182 | @@ -446,9 +446,9 @@ static void uv__fsevents_reschedule(uv__cf_loop_state_t* state, | ||
| 1183 | |||
| 1184 | q = &state->fsevent_handles; | ||
| 1185 | for (; i < path_count; i++) { | ||
| 1186 | - q = uv__queue_next(q); | ||
| 1187 | + q = QUEUE_NEXT(q); | ||
| 1188 | assert(q != &state->fsevent_handles); | ||
| 1189 | - curr = uv__queue_data(q, uv_fs_event_t, cf_member); | ||
| 1190 | + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||
| 1191 | |||
| 1192 | assert(curr->realpath != NULL); | ||
| 1193 | paths[i] = | ||
| 1194 | @@ -486,8 +486,8 @@ final: | ||
| 1195 | |||
| 1196 | /* Broadcast error to all handles */ | ||
| 1197 | uv_mutex_lock(&state->fsevent_mutex); | ||
| 1198 | - uv__queue_foreach(q, &state->fsevent_handles) { | ||
| 1199 | - curr = uv__queue_data(q, uv_fs_event_t, cf_member); | ||
| 1200 | + QUEUE_FOREACH(q, &state->fsevent_handles) { | ||
| 1201 | + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||
| 1202 | uv__fsevents_push_event(curr, NULL, err); | ||
| 1203 | } | ||
| 1204 | uv_mutex_unlock(&state->fsevent_mutex); | ||
| 1205 | @@ -606,7 +606,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) { | ||
| 1206 | if (err) | ||
| 1207 | goto fail_sem_init; | ||
| 1208 | |||
| 1209 | - uv__queue_init(&loop->cf_signals); | ||
| 1210 | + QUEUE_INIT(&loop->cf_signals); | ||
| 1211 | |||
| 1212 | err = uv_sem_init(&state->fsevent_sem, 0); | ||
| 1213 | if (err) | ||
| 1214 | @@ -616,7 +616,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) { | ||
| 1215 | if (err) | ||
| 1216 | goto fail_fsevent_mutex_init; | ||
| 1217 | |||
| 1218 | - uv__queue_init(&state->fsevent_handles); | ||
| 1219 | + QUEUE_INIT(&state->fsevent_handles); | ||
| 1220 | state->fsevent_need_reschedule = 0; | ||
| 1221 | state->fsevent_handle_count = 0; | ||
| 1222 | |||
| 1223 | @@ -675,7 +675,7 @@ fail_mutex_init: | ||
| 1224 | void uv__fsevents_loop_delete(uv_loop_t* loop) { | ||
| 1225 | uv__cf_loop_signal_t* s; | ||
| 1226 | uv__cf_loop_state_t* state; | ||
| 1227 | - struct uv__queue* q; | ||
| 1228 | + QUEUE* q; | ||
| 1229 | |||
| 1230 | if (loop->cf_state == NULL) | ||
| 1231 | return; | ||
| 1232 | @@ -688,10 +688,10 @@ void uv__fsevents_loop_delete(uv_loop_t* loop) { | ||
| 1233 | uv_mutex_destroy(&loop->cf_mutex); | ||
| 1234 | |||
| 1235 | /* Free any remaining data */ | ||
| 1236 | - while (!uv__queue_empty(&loop->cf_signals)) { | ||
| 1237 | - q = uv__queue_head(&loop->cf_signals); | ||
| 1238 | - s = uv__queue_data(q, uv__cf_loop_signal_t, member); | ||
| 1239 | - uv__queue_remove(q); | ||
| 1240 | + while (!QUEUE_EMPTY(&loop->cf_signals)) { | ||
| 1241 | + q = QUEUE_HEAD(&loop->cf_signals); | ||
| 1242 | + s = QUEUE_DATA(q, uv__cf_loop_signal_t, member); | ||
| 1243 | + QUEUE_REMOVE(q); | ||
| 1244 | uv__free(s); | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | @@ -735,22 +735,22 @@ static void* uv__cf_loop_runner(void* arg) { | ||
| 1248 | static void uv__cf_loop_cb(void* arg) { | ||
| 1249 | uv_loop_t* loop; | ||
| 1250 | uv__cf_loop_state_t* state; | ||
| 1251 | - struct uv__queue* item; | ||
| 1252 | - struct uv__queue split_head; | ||
| 1253 | + QUEUE* item; | ||
| 1254 | + QUEUE split_head; | ||
| 1255 | uv__cf_loop_signal_t* s; | ||
| 1256 | |||
| 1257 | loop = arg; | ||
| 1258 | state = loop->cf_state; | ||
| 1259 | |||
| 1260 | uv_mutex_lock(&loop->cf_mutex); | ||
| 1261 | - uv__queue_move(&loop->cf_signals, &split_head); | ||
| 1262 | + QUEUE_MOVE(&loop->cf_signals, &split_head); | ||
| 1263 | uv_mutex_unlock(&loop->cf_mutex); | ||
| 1264 | |||
| 1265 | - while (!uv__queue_empty(&split_head)) { | ||
| 1266 | - item = uv__queue_head(&split_head); | ||
| 1267 | - uv__queue_remove(item); | ||
| 1268 | + while (!QUEUE_EMPTY(&split_head)) { | ||
| 1269 | + item = QUEUE_HEAD(&split_head); | ||
| 1270 | + QUEUE_REMOVE(item); | ||
| 1271 | |||
| 1272 | - s = uv__queue_data(item, uv__cf_loop_signal_t, member); | ||
| 1273 | + s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); | ||
| 1274 | |||
| 1275 | /* This was a termination signal */ | ||
| 1276 | if (s->handle == NULL) | ||
| 1277 | @@ -778,7 +778,7 @@ int uv__cf_loop_signal(uv_loop_t* loop, | ||
| 1278 | item->type = type; | ||
| 1279 | |||
| 1280 | uv_mutex_lock(&loop->cf_mutex); | ||
| 1281 | - uv__queue_insert_tail(&loop->cf_signals, &item->member); | ||
| 1282 | + QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); | ||
| 1283 | |||
| 1284 | state = loop->cf_state; | ||
| 1285 | assert(state != NULL); | ||
| 1286 | @@ -807,7 +807,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) { | ||
| 1287 | handle->realpath_len = strlen(handle->realpath); | ||
| 1288 | |||
| 1289 | /* Initialize event queue */ | ||
| 1290 | - uv__queue_init(&handle->cf_events); | ||
| 1291 | + QUEUE_INIT(&handle->cf_events); | ||
| 1292 | handle->cf_error = 0; | ||
| 1293 | |||
| 1294 | /* | ||
| 1295 | @@ -832,7 +832,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) { | ||
| 1296 | /* Insert handle into the list */ | ||
| 1297 | state = handle->loop->cf_state; | ||
| 1298 | uv_mutex_lock(&state->fsevent_mutex); | ||
| 1299 | - uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member); | ||
| 1300 | + QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member); | ||
| 1301 | state->fsevent_handle_count++; | ||
| 1302 | state->fsevent_need_reschedule = 1; | ||
| 1303 | uv_mutex_unlock(&state->fsevent_mutex); | ||
| 1304 | @@ -872,7 +872,7 @@ int uv__fsevents_close(uv_fs_event_t* handle) { | ||
| 1305 | /* Remove handle from the list */ | ||
| 1306 | state = handle->loop->cf_state; | ||
| 1307 | uv_mutex_lock(&state->fsevent_mutex); | ||
| 1308 | - uv__queue_remove(&handle->cf_member); | ||
| 1309 | + QUEUE_REMOVE(&handle->cf_member); | ||
| 1310 | state->fsevent_handle_count--; | ||
| 1311 | state->fsevent_need_reschedule = 1; | ||
| 1312 | uv_mutex_unlock(&state->fsevent_mutex); | ||
| 1313 | diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h | ||
| 1314 | index fe588513603..6c5822e6a0d 100644 | ||
| 1315 | --- a/deps/uv/src/unix/internal.h | ||
| 1316 | +++ b/deps/uv/src/unix/internal.h | ||
| 1317 | @@ -335,30 +335,20 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req); | ||
| 1318 | int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||
| 1319 | uv_fs_t* req, | ||
| 1320 | uint32_t fsync_flags); | ||
| 1321 | -int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req); | ||
| 1322 | -int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req); | ||
| 1323 | int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req); | ||
| 1324 | int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||
| 1325 | uv_fs_t* req, | ||
| 1326 | int is_read); | ||
| 1327 | -int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req); | ||
| 1328 | int uv__iou_fs_statx(uv_loop_t* loop, | ||
| 1329 | uv_fs_t* req, | ||
| 1330 | int is_fstat, | ||
| 1331 | int is_lstat); | ||
| 1332 | -int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req); | ||
| 1333 | -int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req); | ||
| 1334 | #else | ||
| 1335 | #define uv__iou_fs_close(loop, req) 0 | ||
| 1336 | #define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0 | ||
| 1337 | -#define uv__iou_fs_link(loop, req) 0 | ||
| 1338 | -#define uv__iou_fs_mkdir(loop, req) 0 | ||
| 1339 | #define uv__iou_fs_open(loop, req) 0 | ||
| 1340 | #define uv__iou_fs_read_or_write(loop, req, is_read) 0 | ||
| 1341 | -#define uv__iou_fs_rename(loop, req) 0 | ||
| 1342 | #define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0 | ||
| 1343 | -#define uv__iou_fs_symlink(loop, req) 0 | ||
| 1344 | -#define uv__iou_fs_unlink(loop, req) 0 | ||
| 1345 | #endif | ||
| 1346 | |||
| 1347 | #if defined(__APPLE__) | ||
| 1348 | @@ -439,7 +429,6 @@ int uv__statx(int dirfd, | ||
| 1349 | struct uv__statx* statxbuf); | ||
| 1350 | void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf); | ||
| 1351 | ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags); | ||
| 1352 | -unsigned uv__kernel_version(void); | ||
| 1353 | #endif | ||
| 1354 | |||
| 1355 | typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); | ||
| 1356 | diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c | ||
| 1357 | index b78242d3be4..82916d65933 100644 | ||
| 1358 | --- a/deps/uv/src/unix/kqueue.c | ||
| 1359 | +++ b/deps/uv/src/unix/kqueue.c | ||
| 1360 | @@ -133,7 +133,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1361 | struct timespec spec; | ||
| 1362 | unsigned int nevents; | ||
| 1363 | unsigned int revents; | ||
| 1364 | - struct uv__queue* q; | ||
| 1365 | + QUEUE* q; | ||
| 1366 | uv__io_t* w; | ||
| 1367 | uv_process_t* process; | ||
| 1368 | sigset_t* pset; | ||
| 1369 | @@ -152,19 +152,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1370 | int reset_timeout; | ||
| 1371 | |||
| 1372 | if (loop->nfds == 0) { | ||
| 1373 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 1374 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 1375 | return; | ||
| 1376 | } | ||
| 1377 | |||
| 1378 | lfields = uv__get_internal_fields(loop); | ||
| 1379 | nevents = 0; | ||
| 1380 | |||
| 1381 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 1382 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 1383 | - uv__queue_remove(q); | ||
| 1384 | - uv__queue_init(q); | ||
| 1385 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1386 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 1387 | + QUEUE_REMOVE(q); | ||
| 1388 | + QUEUE_INIT(q); | ||
| 1389 | |||
| 1390 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 1391 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 1392 | assert(w->pevents != 0); | ||
| 1393 | assert(w->fd >= 0); | ||
| 1394 | assert(w->fd < (int) loop->nwatchers); | ||
| 1395 | @@ -307,8 +307,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1396 | |||
| 1397 | /* Handle kevent NOTE_EXIT results */ | ||
| 1398 | if (ev->filter == EVFILT_PROC) { | ||
| 1399 | - uv__queue_foreach(q, &loop->process_handles) { | ||
| 1400 | - process = uv__queue_data(q, uv_process_t, queue); | ||
| 1401 | + QUEUE_FOREACH(q, &loop->process_handles) { | ||
| 1402 | + process = QUEUE_DATA(q, uv_process_t, queue); | ||
| 1403 | if (process->pid == fd) { | ||
| 1404 | process->flags |= UV_HANDLE_REAP; | ||
| 1405 | loop->flags |= UV_LOOP_REAP_CHILDREN; | ||
| 1406 | diff --git a/deps/uv/src/unix/linux.c b/deps/uv/src/unix/linux.c | ||
| 1407 | index 48b9c2c43e1..5f84ad0eea3 100644 | ||
| 1408 | --- a/deps/uv/src/unix/linux.c | ||
| 1409 | +++ b/deps/uv/src/unix/linux.c | ||
| 1410 | @@ -48,7 +48,6 @@ | ||
| 1411 | #include <sys/sysinfo.h> | ||
| 1412 | #include <sys/sysmacros.h> | ||
| 1413 | #include <sys/types.h> | ||
| 1414 | -#include <sys/utsname.h> | ||
| 1415 | #include <time.h> | ||
| 1416 | #include <unistd.h> | ||
| 1417 | |||
| 1418 | @@ -151,11 +150,6 @@ enum { | ||
| 1419 | UV__IORING_OP_CLOSE = 19, | ||
| 1420 | UV__IORING_OP_STATX = 21, | ||
| 1421 | UV__IORING_OP_EPOLL_CTL = 29, | ||
| 1422 | - UV__IORING_OP_RENAMEAT = 35, | ||
| 1423 | - UV__IORING_OP_UNLINKAT = 36, | ||
| 1424 | - UV__IORING_OP_MKDIRAT = 37, | ||
| 1425 | - UV__IORING_OP_SYMLINKAT = 38, | ||
| 1426 | - UV__IORING_OP_LINKAT = 39, | ||
| 1427 | }; | ||
| 1428 | |||
| 1429 | enum { | ||
| 1430 | @@ -168,10 +162,6 @@ enum { | ||
| 1431 | UV__IORING_SQ_CQ_OVERFLOW = 2u, | ||
| 1432 | }; | ||
| 1433 | |||
| 1434 | -enum { | ||
| 1435 | - UV__MKDIRAT_SYMLINKAT_LINKAT = 1u, | ||
| 1436 | -}; | ||
| 1437 | - | ||
| 1438 | struct uv__io_cqring_offsets { | ||
| 1439 | uint32_t head; | ||
| 1440 | uint32_t tail; | ||
| 1441 | @@ -267,7 +257,7 @@ STATIC_ASSERT(EPOLL_CTL_MOD < 4); | ||
| 1442 | |||
| 1443 | struct watcher_list { | ||
| 1444 | RB_ENTRY(watcher_list) entry; | ||
| 1445 | - struct uv__queue watchers; | ||
| 1446 | + QUEUE watchers; | ||
| 1447 | int iterating; | ||
| 1448 | char* path; | ||
| 1449 | int wd; | ||
| 1450 | @@ -310,31 +300,6 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) { | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | |||
| 1454 | -unsigned uv__kernel_version(void) { | ||
| 1455 | - static _Atomic unsigned cached_version; | ||
| 1456 | - struct utsname u; | ||
| 1457 | - unsigned version; | ||
| 1458 | - unsigned major; | ||
| 1459 | - unsigned minor; | ||
| 1460 | - unsigned patch; | ||
| 1461 | - | ||
| 1462 | - version = atomic_load_explicit(&cached_version, memory_order_relaxed); | ||
| 1463 | - if (version != 0) | ||
| 1464 | - return version; | ||
| 1465 | - | ||
| 1466 | - if (-1 == uname(&u)) | ||
| 1467 | - return 0; | ||
| 1468 | - | ||
| 1469 | - if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch)) | ||
| 1470 | - return 0; | ||
| 1471 | - | ||
| 1472 | - version = major * 65536 + minor * 256 + patch; | ||
| 1473 | - atomic_store_explicit(&cached_version, version, memory_order_relaxed); | ||
| 1474 | - | ||
| 1475 | - return version; | ||
| 1476 | -} | ||
| 1477 | - | ||
| 1478 | - | ||
| 1479 | ssize_t | ||
| 1480 | uv__fs_copy_file_range(int fd_in, | ||
| 1481 | off_t* off_in, | ||
| 1482 | @@ -420,9 +385,6 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) { | ||
| 1483 | |||
| 1484 | |||
| 1485 | static int uv__use_io_uring(void) { | ||
| 1486 | -#if defined(__ANDROID_API__) | ||
| 1487 | - return 0; /* Possibly available but blocked by seccomp. */ | ||
| 1488 | -#else | ||
| 1489 | /* Ternary: unknown=0, yes=1, no=-1 */ | ||
| 1490 | static _Atomic int use_io_uring; | ||
| 1491 | char* val; | ||
| 1492 | @@ -437,7 +399,6 @@ static int uv__use_io_uring(void) { | ||
| 1493 | } | ||
| 1494 | |||
| 1495 | return use > 0; | ||
| 1496 | -#endif | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | |||
| 1500 | @@ -542,10 +503,6 @@ static void uv__iou_init(int epollfd, | ||
| 1501 | iou->sqelen = sqelen; | ||
| 1502 | iou->ringfd = ringfd; | ||
| 1503 | iou->in_flight = 0; | ||
| 1504 | - iou->flags = 0; | ||
| 1505 | - | ||
| 1506 | - if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00) | ||
| 1507 | - iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT; | ||
| 1508 | |||
| 1509 | for (i = 0; i <= iou->sqmask; i++) | ||
| 1510 | iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */ | ||
| 1511 | @@ -727,7 +684,7 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, | ||
| 1512 | req->work_req.loop = loop; | ||
| 1513 | req->work_req.work = NULL; | ||
| 1514 | req->work_req.done = NULL; | ||
| 1515 | - uv__queue_init(&req->work_req.wq); | ||
| 1516 | + QUEUE_INIT(&req->work_req.wq); | ||
| 1517 | |||
| 1518 | uv__req_register(loop, req); | ||
| 1519 | iou->in_flight++; | ||
| 1520 | @@ -757,17 +714,6 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1521 | struct uv__io_uring_sqe* sqe; | ||
| 1522 | struct uv__iou* iou; | ||
| 1523 | |||
| 1524 | - /* Work around a poorly understood bug in older kernels where closing a file | ||
| 1525 | - * descriptor pointing to /foo/bar results in ETXTBSY errors when trying to | ||
| 1526 | - * execve("/foo/bar") later on. The bug seems to have been fixed somewhere | ||
| 1527 | - * between 5.15.85 and 5.15.90. I couldn't pinpoint the responsible commit | ||
| 1528 | - * but good candidates are the several data race fixes. Interestingly, it | ||
| 1529 | - * seems to manifest only when running under Docker so the possibility of | ||
| 1530 | - * a Docker bug can't be completely ruled out either. Yay, computers. | ||
| 1531 | - */ | ||
| 1532 | - if (uv__kernel_version() < /* 5.15.90 */ 0x050F5A) | ||
| 1533 | - return 0; | ||
| 1534 | - | ||
| 1535 | iou = &uv__get_internal_fields(loop)->iou; | ||
| 1536 | |||
| 1537 | sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1538 | @@ -808,55 +754,6 @@ int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | |||
| 1542 | -int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1543 | - struct uv__io_uring_sqe* sqe; | ||
| 1544 | - struct uv__iou* iou; | ||
| 1545 | - | ||
| 1546 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1547 | - | ||
| 1548 | - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||
| 1549 | - return 0; | ||
| 1550 | - | ||
| 1551 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1552 | - if (sqe == NULL) | ||
| 1553 | - return 0; | ||
| 1554 | - | ||
| 1555 | - sqe->addr = (uintptr_t) req->path; | ||
| 1556 | - sqe->fd = AT_FDCWD; | ||
| 1557 | - sqe->addr2 = (uintptr_t) req->new_path; | ||
| 1558 | - sqe->len = AT_FDCWD; | ||
| 1559 | - sqe->opcode = UV__IORING_OP_LINKAT; | ||
| 1560 | - | ||
| 1561 | - uv__iou_submit(iou); | ||
| 1562 | - | ||
| 1563 | - return 1; | ||
| 1564 | -} | ||
| 1565 | - | ||
| 1566 | - | ||
| 1567 | -int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1568 | - struct uv__io_uring_sqe* sqe; | ||
| 1569 | - struct uv__iou* iou; | ||
| 1570 | - | ||
| 1571 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1572 | - | ||
| 1573 | - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||
| 1574 | - return 0; | ||
| 1575 | - | ||
| 1576 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1577 | - if (sqe == NULL) | ||
| 1578 | - return 0; | ||
| 1579 | - | ||
| 1580 | - sqe->addr = (uintptr_t) req->path; | ||
| 1581 | - sqe->fd = AT_FDCWD; | ||
| 1582 | - sqe->len = req->mode; | ||
| 1583 | - sqe->opcode = UV__IORING_OP_MKDIRAT; | ||
| 1584 | - | ||
| 1585 | - uv__iou_submit(iou); | ||
| 1586 | - | ||
| 1587 | - return 1; | ||
| 1588 | -} | ||
| 1589 | - | ||
| 1590 | - | ||
| 1591 | int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1592 | struct uv__io_uring_sqe* sqe; | ||
| 1593 | struct uv__iou* iou; | ||
| 1594 | @@ -879,86 +776,16 @@ int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1595 | } | ||
| 1596 | |||
| 1597 | |||
| 1598 | -int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1599 | - struct uv__io_uring_sqe* sqe; | ||
| 1600 | - struct uv__iou* iou; | ||
| 1601 | - | ||
| 1602 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1603 | - | ||
| 1604 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1605 | - if (sqe == NULL) | ||
| 1606 | - return 0; | ||
| 1607 | - | ||
| 1608 | - sqe->addr = (uintptr_t) req->path; | ||
| 1609 | - sqe->fd = AT_FDCWD; | ||
| 1610 | - sqe->addr2 = (uintptr_t) req->new_path; | ||
| 1611 | - sqe->len = AT_FDCWD; | ||
| 1612 | - sqe->opcode = UV__IORING_OP_RENAMEAT; | ||
| 1613 | - | ||
| 1614 | - uv__iou_submit(iou); | ||
| 1615 | - | ||
| 1616 | - return 1; | ||
| 1617 | -} | ||
| 1618 | - | ||
| 1619 | - | ||
| 1620 | -int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1621 | - struct uv__io_uring_sqe* sqe; | ||
| 1622 | - struct uv__iou* iou; | ||
| 1623 | - | ||
| 1624 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1625 | - | ||
| 1626 | - if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||
| 1627 | - return 0; | ||
| 1628 | - | ||
| 1629 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1630 | - if (sqe == NULL) | ||
| 1631 | - return 0; | ||
| 1632 | - | ||
| 1633 | - sqe->addr = (uintptr_t) req->path; | ||
| 1634 | - sqe->fd = AT_FDCWD; | ||
| 1635 | - sqe->addr2 = (uintptr_t) req->new_path; | ||
| 1636 | - sqe->opcode = UV__IORING_OP_SYMLINKAT; | ||
| 1637 | - | ||
| 1638 | - uv__iou_submit(iou); | ||
| 1639 | - | ||
| 1640 | - return 1; | ||
| 1641 | -} | ||
| 1642 | - | ||
| 1643 | - | ||
| 1644 | -int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) { | ||
| 1645 | - struct uv__io_uring_sqe* sqe; | ||
| 1646 | - struct uv__iou* iou; | ||
| 1647 | - | ||
| 1648 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1649 | - | ||
| 1650 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1651 | - if (sqe == NULL) | ||
| 1652 | - return 0; | ||
| 1653 | - | ||
| 1654 | - sqe->addr = (uintptr_t) req->path; | ||
| 1655 | - sqe->fd = AT_FDCWD; | ||
| 1656 | - sqe->opcode = UV__IORING_OP_UNLINKAT; | ||
| 1657 | - | ||
| 1658 | - uv__iou_submit(iou); | ||
| 1659 | - | ||
| 1660 | - return 1; | ||
| 1661 | -} | ||
| 1662 | - | ||
| 1663 | - | ||
| 1664 | int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||
| 1665 | uv_fs_t* req, | ||
| 1666 | int is_read) { | ||
| 1667 | struct uv__io_uring_sqe* sqe; | ||
| 1668 | struct uv__iou* iou; | ||
| 1669 | |||
| 1670 | - /* If iovcnt is greater than IOV_MAX, cap it to IOV_MAX on reads and fallback | ||
| 1671 | - * to the threadpool on writes */ | ||
| 1672 | - if (req->nbufs > IOV_MAX) { | ||
| 1673 | - if (is_read) | ||
| 1674 | - req->nbufs = IOV_MAX; | ||
| 1675 | - else | ||
| 1676 | - return 0; | ||
| 1677 | - } | ||
| 1678 | + /* For the moment, if iovcnt is greater than IOV_MAX, fallback to the | ||
| 1679 | + * threadpool. In the future we might take advantage of IOSQE_IO_LINK. */ | ||
| 1680 | + if (req->nbufs > IOV_MAX) | ||
| 1681 | + return 0; | ||
| 1682 | |||
| 1683 | iou = &uv__get_internal_fields(loop)->iou; | ||
| 1684 | |||
| 1685 | @@ -1265,7 +1092,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1686 | struct uv__iou* ctl; | ||
| 1687 | struct uv__iou* iou; | ||
| 1688 | int real_timeout; | ||
| 1689 | - struct uv__queue* q; | ||
| 1690 | + QUEUE* q; | ||
| 1691 | uv__io_t* w; | ||
| 1692 | sigset_t* sigmask; | ||
| 1693 | sigset_t sigset; | ||
| 1694 | @@ -1311,11 +1138,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1695 | |||
| 1696 | memset(&e, 0, sizeof(e)); | ||
| 1697 | |||
| 1698 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 1699 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 1700 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 1701 | - uv__queue_remove(q); | ||
| 1702 | - uv__queue_init(q); | ||
| 1703 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1704 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 1705 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 1706 | + QUEUE_REMOVE(q); | ||
| 1707 | + QUEUE_INIT(q); | ||
| 1708 | |||
| 1709 | op = EPOLL_CTL_MOD; | ||
| 1710 | if (w->events == 0) | ||
| 1711 | @@ -2272,8 +2099,8 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) { | ||
| 1712 | struct watcher_list* tmp_watcher_list_iter; | ||
| 1713 | struct watcher_list* watcher_list; | ||
| 1714 | struct watcher_list tmp_watcher_list; | ||
| 1715 | - struct uv__queue queue; | ||
| 1716 | - struct uv__queue* q; | ||
| 1717 | + QUEUE queue; | ||
| 1718 | + QUEUE* q; | ||
| 1719 | uv_fs_event_t* handle; | ||
| 1720 | char* tmp_path; | ||
| 1721 | |||
| 1722 | @@ -2285,41 +2112,41 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) { | ||
| 1723 | */ | ||
| 1724 | loop->inotify_watchers = root; | ||
| 1725 | |||
| 1726 | - uv__queue_init(&tmp_watcher_list.watchers); | ||
| 1727 | + QUEUE_INIT(&tmp_watcher_list.watchers); | ||
| 1728 | /* Note that the queue we use is shared with the start and stop() | ||
| 1729 | - * functions, making uv__queue_foreach unsafe to use. So we use the | ||
| 1730 | - * uv__queue_move trick to safely iterate. Also don't free the watcher | ||
| 1731 | + * functions, making QUEUE_FOREACH unsafe to use. So we use the | ||
| 1732 | + * QUEUE_MOVE trick to safely iterate. Also don't free the watcher | ||
| 1733 | * list until we're done iterating. c.f. uv__inotify_read. | ||
| 1734 | */ | ||
| 1735 | RB_FOREACH_SAFE(watcher_list, watcher_root, | ||
| 1736 | uv__inotify_watchers(loop), tmp_watcher_list_iter) { | ||
| 1737 | watcher_list->iterating = 1; | ||
| 1738 | - uv__queue_move(&watcher_list->watchers, &queue); | ||
| 1739 | - while (!uv__queue_empty(&queue)) { | ||
| 1740 | - q = uv__queue_head(&queue); | ||
| 1741 | - handle = uv__queue_data(q, uv_fs_event_t, watchers); | ||
| 1742 | + QUEUE_MOVE(&watcher_list->watchers, &queue); | ||
| 1743 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 1744 | + q = QUEUE_HEAD(&queue); | ||
| 1745 | + handle = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||
| 1746 | /* It's critical to keep a copy of path here, because it | ||
| 1747 | * will be set to NULL by stop() and then deallocated by | ||
| 1748 | * maybe_free_watcher_list | ||
| 1749 | */ | ||
| 1750 | tmp_path = uv__strdup(handle->path); | ||
| 1751 | assert(tmp_path != NULL); | ||
| 1752 | - uv__queue_remove(q); | ||
| 1753 | - uv__queue_insert_tail(&watcher_list->watchers, q); | ||
| 1754 | + QUEUE_REMOVE(q); | ||
| 1755 | + QUEUE_INSERT_TAIL(&watcher_list->watchers, q); | ||
| 1756 | uv_fs_event_stop(handle); | ||
| 1757 | |||
| 1758 | - uv__queue_insert_tail(&tmp_watcher_list.watchers, &handle->watchers); | ||
| 1759 | + QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers); | ||
| 1760 | handle->path = tmp_path; | ||
| 1761 | } | ||
| 1762 | watcher_list->iterating = 0; | ||
| 1763 | maybe_free_watcher_list(watcher_list, loop); | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | - uv__queue_move(&tmp_watcher_list.watchers, &queue); | ||
| 1767 | - while (!uv__queue_empty(&queue)) { | ||
| 1768 | - q = uv__queue_head(&queue); | ||
| 1769 | - uv__queue_remove(q); | ||
| 1770 | - handle = uv__queue_data(q, uv_fs_event_t, watchers); | ||
| 1771 | + QUEUE_MOVE(&tmp_watcher_list.watchers, &queue); | ||
| 1772 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 1773 | + q = QUEUE_HEAD(&queue); | ||
| 1774 | + QUEUE_REMOVE(q); | ||
| 1775 | + handle = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||
| 1776 | tmp_path = handle->path; | ||
| 1777 | handle->path = NULL; | ||
| 1778 | err = uv_fs_event_start(handle, handle->cb, tmp_path, 0); | ||
| 1779 | @@ -2341,7 +2168,7 @@ static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { | ||
| 1780 | |||
| 1781 | static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { | ||
| 1782 | /* if the watcher_list->watchers is being iterated over, we can't free it. */ | ||
| 1783 | - if ((!w->iterating) && uv__queue_empty(&w->watchers)) { | ||
| 1784 | + if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { | ||
| 1785 | /* No watchers left for this path. Clean up. */ | ||
| 1786 | RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w); | ||
| 1787 | inotify_rm_watch(loop->inotify_fd, w->wd); | ||
| 1788 | @@ -2356,8 +2183,8 @@ static void uv__inotify_read(uv_loop_t* loop, | ||
| 1789 | const struct inotify_event* e; | ||
| 1790 | struct watcher_list* w; | ||
| 1791 | uv_fs_event_t* h; | ||
| 1792 | - struct uv__queue queue; | ||
| 1793 | - struct uv__queue* q; | ||
| 1794 | + QUEUE queue; | ||
| 1795 | + QUEUE* q; | ||
| 1796 | const char* path; | ||
| 1797 | ssize_t size; | ||
| 1798 | const char *p; | ||
| 1799 | @@ -2400,7 +2227,7 @@ static void uv__inotify_read(uv_loop_t* loop, | ||
| 1800 | * What can go wrong? | ||
| 1801 | * A callback could call uv_fs_event_stop() | ||
| 1802 | * and the queue can change under our feet. | ||
| 1803 | - * So, we use uv__queue_move() trick to safely iterate over the queue. | ||
| 1804 | + * So, we use QUEUE_MOVE() trick to safely iterate over the queue. | ||
| 1805 | * And we don't free the watcher_list until we're done iterating. | ||
| 1806 | * | ||
| 1807 | * First, | ||
| 1808 | @@ -2408,13 +2235,13 @@ static void uv__inotify_read(uv_loop_t* loop, | ||
| 1809 | * not to free watcher_list. | ||
| 1810 | */ | ||
| 1811 | w->iterating = 1; | ||
| 1812 | - uv__queue_move(&w->watchers, &queue); | ||
| 1813 | - while (!uv__queue_empty(&queue)) { | ||
| 1814 | - q = uv__queue_head(&queue); | ||
| 1815 | - h = uv__queue_data(q, uv_fs_event_t, watchers); | ||
| 1816 | + QUEUE_MOVE(&w->watchers, &queue); | ||
| 1817 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 1818 | + q = QUEUE_HEAD(&queue); | ||
| 1819 | + h = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||
| 1820 | |||
| 1821 | - uv__queue_remove(q); | ||
| 1822 | - uv__queue_insert_tail(&w->watchers, q); | ||
| 1823 | + QUEUE_REMOVE(q); | ||
| 1824 | + QUEUE_INSERT_TAIL(&w->watchers, q); | ||
| 1825 | |||
| 1826 | h->cb(h, path, events, 0); | ||
| 1827 | } | ||
| 1828 | @@ -2476,13 +2303,13 @@ int uv_fs_event_start(uv_fs_event_t* handle, | ||
| 1829 | |||
| 1830 | w->wd = wd; | ||
| 1831 | w->path = memcpy(w + 1, path, len); | ||
| 1832 | - uv__queue_init(&w->watchers); | ||
| 1833 | + QUEUE_INIT(&w->watchers); | ||
| 1834 | w->iterating = 0; | ||
| 1835 | RB_INSERT(watcher_root, uv__inotify_watchers(loop), w); | ||
| 1836 | |||
| 1837 | no_insert: | ||
| 1838 | uv__handle_start(handle); | ||
| 1839 | - uv__queue_insert_tail(&w->watchers, &handle->watchers); | ||
| 1840 | + QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); | ||
| 1841 | handle->path = w->path; | ||
| 1842 | handle->cb = cb; | ||
| 1843 | handle->wd = wd; | ||
| 1844 | @@ -2503,7 +2330,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { | ||
| 1845 | handle->wd = -1; | ||
| 1846 | handle->path = NULL; | ||
| 1847 | uv__handle_stop(handle); | ||
| 1848 | - uv__queue_remove(&handle->watchers); | ||
| 1849 | + QUEUE_REMOVE(&handle->watchers); | ||
| 1850 | |||
| 1851 | maybe_free_watcher_list(w, handle->loop); | ||
| 1852 | |||
| 1853 | diff --git a/deps/uv/src/unix/loop-watcher.c b/deps/uv/src/unix/loop-watcher.c | ||
| 1854 | index 2db8b515df7..b8c1c2a7102 100644 | ||
| 1855 | --- a/deps/uv/src/unix/loop-watcher.c | ||
| 1856 | +++ b/deps/uv/src/unix/loop-watcher.c | ||
| 1857 | @@ -32,7 +32,7 @@ | ||
| 1858 | int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \ | ||
| 1859 | if (uv__is_active(handle)) return 0; \ | ||
| 1860 | if (cb == NULL) return UV_EINVAL; \ | ||
| 1861 | - uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue); \ | ||
| 1862 | + QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \ | ||
| 1863 | handle->name##_cb = cb; \ | ||
| 1864 | uv__handle_start(handle); \ | ||
| 1865 | return 0; \ | ||
| 1866 | @@ -40,21 +40,21 @@ | ||
| 1867 | \ | ||
| 1868 | int uv_##name##_stop(uv_##name##_t* handle) { \ | ||
| 1869 | if (!uv__is_active(handle)) return 0; \ | ||
| 1870 | - uv__queue_remove(&handle->queue); \ | ||
| 1871 | + QUEUE_REMOVE(&handle->queue); \ | ||
| 1872 | uv__handle_stop(handle); \ | ||
| 1873 | return 0; \ | ||
| 1874 | } \ | ||
| 1875 | \ | ||
| 1876 | void uv__run_##name(uv_loop_t* loop) { \ | ||
| 1877 | uv_##name##_t* h; \ | ||
| 1878 | - struct uv__queue queue; \ | ||
| 1879 | - struct uv__queue* q; \ | ||
| 1880 | - uv__queue_move(&loop->name##_handles, &queue); \ | ||
| 1881 | - while (!uv__queue_empty(&queue)) { \ | ||
| 1882 | - q = uv__queue_head(&queue); \ | ||
| 1883 | - h = uv__queue_data(q, uv_##name##_t, queue); \ | ||
| 1884 | - uv__queue_remove(q); \ | ||
| 1885 | - uv__queue_insert_tail(&loop->name##_handles, q); \ | ||
| 1886 | + QUEUE queue; \ | ||
| 1887 | + QUEUE* q; \ | ||
| 1888 | + QUEUE_MOVE(&loop->name##_handles, &queue); \ | ||
| 1889 | + while (!QUEUE_EMPTY(&queue)) { \ | ||
| 1890 | + q = QUEUE_HEAD(&queue); \ | ||
| 1891 | + h = QUEUE_DATA(q, uv_##name##_t, queue); \ | ||
| 1892 | + QUEUE_REMOVE(q); \ | ||
| 1893 | + QUEUE_INSERT_TAIL(&loop->name##_handles, q); \ | ||
| 1894 | h->name##_cb(h); \ | ||
| 1895 | } \ | ||
| 1896 | } \ | ||
| 1897 | diff --git a/deps/uv/src/unix/loop.c b/deps/uv/src/unix/loop.c | ||
| 1898 | index a9468e8e19c..90a51b339de 100644 | ||
| 1899 | --- a/deps/uv/src/unix/loop.c | ||
| 1900 | +++ b/deps/uv/src/unix/loop.c | ||
| 1901 | @@ -50,20 +50,20 @@ int uv_loop_init(uv_loop_t* loop) { | ||
| 1902 | sizeof(lfields->loop_metrics.metrics)); | ||
| 1903 | |||
| 1904 | heap_init((struct heap*) &loop->timer_heap); | ||
| 1905 | - uv__queue_init(&loop->wq); | ||
| 1906 | - uv__queue_init(&loop->idle_handles); | ||
| 1907 | - uv__queue_init(&loop->async_handles); | ||
| 1908 | - uv__queue_init(&loop->check_handles); | ||
| 1909 | - uv__queue_init(&loop->prepare_handles); | ||
| 1910 | - uv__queue_init(&loop->handle_queue); | ||
| 1911 | + QUEUE_INIT(&loop->wq); | ||
| 1912 | + QUEUE_INIT(&loop->idle_handles); | ||
| 1913 | + QUEUE_INIT(&loop->async_handles); | ||
| 1914 | + QUEUE_INIT(&loop->check_handles); | ||
| 1915 | + QUEUE_INIT(&loop->prepare_handles); | ||
| 1916 | + QUEUE_INIT(&loop->handle_queue); | ||
| 1917 | |||
| 1918 | loop->active_handles = 0; | ||
| 1919 | loop->active_reqs.count = 0; | ||
| 1920 | loop->nfds = 0; | ||
| 1921 | loop->watchers = NULL; | ||
| 1922 | loop->nwatchers = 0; | ||
| 1923 | - uv__queue_init(&loop->pending_queue); | ||
| 1924 | - uv__queue_init(&loop->watcher_queue); | ||
| 1925 | + QUEUE_INIT(&loop->pending_queue); | ||
| 1926 | + QUEUE_INIT(&loop->watcher_queue); | ||
| 1927 | |||
| 1928 | loop->closing_handles = NULL; | ||
| 1929 | uv__update_time(loop); | ||
| 1930 | @@ -85,7 +85,7 @@ int uv_loop_init(uv_loop_t* loop) { | ||
| 1931 | err = uv__process_init(loop); | ||
| 1932 | if (err) | ||
| 1933 | goto fail_signal_init; | ||
| 1934 | - uv__queue_init(&loop->process_handles); | ||
| 1935 | + QUEUE_INIT(&loop->process_handles); | ||
| 1936 | |||
| 1937 | err = uv_rwlock_init(&loop->cloexec_lock); | ||
| 1938 | if (err) | ||
| 1939 | @@ -152,9 +152,9 @@ int uv_loop_fork(uv_loop_t* loop) { | ||
| 1940 | if (w == NULL) | ||
| 1941 | continue; | ||
| 1942 | |||
| 1943 | - if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) { | ||
| 1944 | + if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) { | ||
| 1945 | w->events = 0; /* Force re-registration in uv__io_poll. */ | ||
| 1946 | - uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||
| 1947 | + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||
| 1948 | } | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | @@ -180,7 +180,7 @@ void uv__loop_close(uv_loop_t* loop) { | ||
| 1952 | } | ||
| 1953 | |||
| 1954 | uv_mutex_lock(&loop->wq_mutex); | ||
| 1955 | - assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!"); | ||
| 1956 | + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); | ||
| 1957 | assert(!uv__has_active_reqs(loop)); | ||
| 1958 | uv_mutex_unlock(&loop->wq_mutex); | ||
| 1959 | uv_mutex_destroy(&loop->wq_mutex); | ||
| 1960 | @@ -192,8 +192,8 @@ void uv__loop_close(uv_loop_t* loop) { | ||
| 1961 | uv_rwlock_destroy(&loop->cloexec_lock); | ||
| 1962 | |||
| 1963 | #if 0 | ||
| 1964 | - assert(uv__queue_empty(&loop->pending_queue)); | ||
| 1965 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 1966 | + assert(QUEUE_EMPTY(&loop->pending_queue)); | ||
| 1967 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 1968 | assert(loop->nfds == 0); | ||
| 1969 | #endif | ||
| 1970 | |||
| 1971 | diff --git a/deps/uv/src/unix/os390-syscalls.c b/deps/uv/src/unix/os390-syscalls.c | ||
| 1972 | index 7f90c270906..5861aaaa202 100644 | ||
| 1973 | --- a/deps/uv/src/unix/os390-syscalls.c | ||
| 1974 | +++ b/deps/uv/src/unix/os390-syscalls.c | ||
| 1975 | @@ -27,7 +27,7 @@ | ||
| 1976 | #include <termios.h> | ||
| 1977 | #include <sys/msg.h> | ||
| 1978 | |||
| 1979 | -static struct uv__queue global_epoll_queue; | ||
| 1980 | +static QUEUE global_epoll_queue; | ||
| 1981 | static uv_mutex_t global_epoll_lock; | ||
| 1982 | static uv_once_t once = UV_ONCE_INIT; | ||
| 1983 | |||
| 1984 | @@ -178,18 +178,18 @@ static void after_fork(void) { | ||
| 1985 | |||
| 1986 | |||
| 1987 | static void child_fork(void) { | ||
| 1988 | - struct uv__queue* q; | ||
| 1989 | + QUEUE* q; | ||
| 1990 | uv_once_t child_once = UV_ONCE_INIT; | ||
| 1991 | |||
| 1992 | /* reset once */ | ||
| 1993 | memcpy(&once, &child_once, sizeof(child_once)); | ||
| 1994 | |||
| 1995 | /* reset epoll list */ | ||
| 1996 | - while (!uv__queue_empty(&global_epoll_queue)) { | ||
| 1997 | + while (!QUEUE_EMPTY(&global_epoll_queue)) { | ||
| 1998 | uv__os390_epoll* lst; | ||
| 1999 | - q = uv__queue_head(&global_epoll_queue); | ||
| 2000 | - uv__queue_remove(q); | ||
| 2001 | - lst = uv__queue_data(q, uv__os390_epoll, member); | ||
| 2002 | + q = QUEUE_HEAD(&global_epoll_queue); | ||
| 2003 | + QUEUE_REMOVE(q); | ||
| 2004 | + lst = QUEUE_DATA(q, uv__os390_epoll, member); | ||
| 2005 | uv__free(lst->items); | ||
| 2006 | lst->items = NULL; | ||
| 2007 | lst->size = 0; | ||
| 2008 | @@ -201,7 +201,7 @@ static void child_fork(void) { | ||
| 2009 | |||
| 2010 | |||
| 2011 | static void epoll_init(void) { | ||
| 2012 | - uv__queue_init(&global_epoll_queue); | ||
| 2013 | + QUEUE_INIT(&global_epoll_queue); | ||
| 2014 | if (uv_mutex_init(&global_epoll_lock)) | ||
| 2015 | abort(); | ||
| 2016 | |||
| 2017 | @@ -225,7 +225,7 @@ uv__os390_epoll* epoll_create1(int flags) { | ||
| 2018 | lst->items[lst->size - 1].revents = 0; | ||
| 2019 | uv_once(&once, epoll_init); | ||
| 2020 | uv_mutex_lock(&global_epoll_lock); | ||
| 2021 | - uv__queue_insert_tail(&global_epoll_queue, &lst->member); | ||
| 2022 | + QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member); | ||
| 2023 | uv_mutex_unlock(&global_epoll_lock); | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | @@ -352,14 +352,14 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events, | ||
| 2027 | |||
| 2028 | |||
| 2029 | int epoll_file_close(int fd) { | ||
| 2030 | - struct uv__queue* q; | ||
| 2031 | + QUEUE* q; | ||
| 2032 | |||
| 2033 | uv_once(&once, epoll_init); | ||
| 2034 | uv_mutex_lock(&global_epoll_lock); | ||
| 2035 | - uv__queue_foreach(q, &global_epoll_queue) { | ||
| 2036 | + QUEUE_FOREACH(q, &global_epoll_queue) { | ||
| 2037 | uv__os390_epoll* lst; | ||
| 2038 | |||
| 2039 | - lst = uv__queue_data(q, uv__os390_epoll, member); | ||
| 2040 | + lst = QUEUE_DATA(q, uv__os390_epoll, member); | ||
| 2041 | if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1) | ||
| 2042 | lst->items[fd].fd = -1; | ||
| 2043 | } | ||
| 2044 | @@ -371,7 +371,7 @@ int epoll_file_close(int fd) { | ||
| 2045 | void epoll_queue_close(uv__os390_epoll* lst) { | ||
| 2046 | /* Remove epoll instance from global queue */ | ||
| 2047 | uv_mutex_lock(&global_epoll_lock); | ||
| 2048 | - uv__queue_remove(&lst->member); | ||
| 2049 | + QUEUE_REMOVE(&lst->member); | ||
| 2050 | uv_mutex_unlock(&global_epoll_lock); | ||
| 2051 | |||
| 2052 | /* Free resources */ | ||
| 2053 | diff --git a/deps/uv/src/unix/os390-syscalls.h b/deps/uv/src/unix/os390-syscalls.h | ||
| 2054 | index d5f3bcf8b1c..9f504171d85 100644 | ||
| 2055 | --- a/deps/uv/src/unix/os390-syscalls.h | ||
| 2056 | +++ b/deps/uv/src/unix/os390-syscalls.h | ||
| 2057 | @@ -45,7 +45,7 @@ struct epoll_event { | ||
| 2058 | }; | ||
| 2059 | |||
| 2060 | typedef struct { | ||
| 2061 | - struct uv__queue member; | ||
| 2062 | + QUEUE member; | ||
| 2063 | struct pollfd* items; | ||
| 2064 | unsigned long size; | ||
| 2065 | int msg_queue; | ||
| 2066 | diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c | ||
| 2067 | index bbd37692d1d..a87c2d77faf 100644 | ||
| 2068 | --- a/deps/uv/src/unix/os390.c | ||
| 2069 | +++ b/deps/uv/src/unix/os390.c | ||
| 2070 | @@ -815,7 +815,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2071 | uv__os390_epoll* ep; | ||
| 2072 | int have_signals; | ||
| 2073 | int real_timeout; | ||
| 2074 | - struct uv__queue* q; | ||
| 2075 | + QUEUE* q; | ||
| 2076 | uv__io_t* w; | ||
| 2077 | uint64_t base; | ||
| 2078 | int count; | ||
| 2079 | @@ -827,19 +827,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2080 | int reset_timeout; | ||
| 2081 | |||
| 2082 | if (loop->nfds == 0) { | ||
| 2083 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 2084 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 2085 | return; | ||
| 2086 | } | ||
| 2087 | |||
| 2088 | lfields = uv__get_internal_fields(loop); | ||
| 2089 | |||
| 2090 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 2091 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 2092 | uv_stream_t* stream; | ||
| 2093 | |||
| 2094 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 2095 | - uv__queue_remove(q); | ||
| 2096 | - uv__queue_init(q); | ||
| 2097 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 2098 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 2099 | + QUEUE_REMOVE(q); | ||
| 2100 | + QUEUE_INIT(q); | ||
| 2101 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 2102 | |||
| 2103 | assert(w->pevents != 0); | ||
| 2104 | assert(w->fd >= 0); | ||
| 2105 | diff --git a/deps/uv/src/unix/pipe.c b/deps/uv/src/unix/pipe.c | ||
| 2106 | index d332f351830..ce91ac49b30 100644 | ||
| 2107 | --- a/deps/uv/src/unix/pipe.c | ||
| 2108 | +++ b/deps/uv/src/unix/pipe.c | ||
| 2109 | @@ -297,7 +297,7 @@ out: | ||
| 2110 | uv__req_init(handle->loop, req, UV_CONNECT); | ||
| 2111 | req->handle = (uv_stream_t*)handle; | ||
| 2112 | req->cb = cb; | ||
| 2113 | - uv__queue_init(&req->queue); | ||
| 2114 | + QUEUE_INIT(&req->queue); | ||
| 2115 | |||
| 2116 | /* Force callback to run on next tick in case of error. */ | ||
| 2117 | if (err) | ||
| 2118 | diff --git a/deps/uv/src/unix/posix-poll.c b/deps/uv/src/unix/posix-poll.c | ||
| 2119 | index 2e016c2fbae..7e7de86845d 100644 | ||
| 2120 | --- a/deps/uv/src/unix/posix-poll.c | ||
| 2121 | +++ b/deps/uv/src/unix/posix-poll.c | ||
| 2122 | @@ -137,7 +137,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2123 | sigset_t set; | ||
| 2124 | uint64_t time_base; | ||
| 2125 | uint64_t time_diff; | ||
| 2126 | - struct uv__queue* q; | ||
| 2127 | + QUEUE* q; | ||
| 2128 | uv__io_t* w; | ||
| 2129 | size_t i; | ||
| 2130 | unsigned int nevents; | ||
| 2131 | @@ -149,19 +149,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2132 | int reset_timeout; | ||
| 2133 | |||
| 2134 | if (loop->nfds == 0) { | ||
| 2135 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 2136 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 2137 | return; | ||
| 2138 | } | ||
| 2139 | |||
| 2140 | lfields = uv__get_internal_fields(loop); | ||
| 2141 | |||
| 2142 | /* Take queued watchers and add their fds to our poll fds array. */ | ||
| 2143 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 2144 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 2145 | - uv__queue_remove(q); | ||
| 2146 | - uv__queue_init(q); | ||
| 2147 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 2148 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 2149 | + QUEUE_REMOVE(q); | ||
| 2150 | + QUEUE_INIT(q); | ||
| 2151 | |||
| 2152 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 2153 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 2154 | assert(w->pevents != 0); | ||
| 2155 | assert(w->fd >= 0); | ||
| 2156 | assert(w->fd < (int) loop->nwatchers); | ||
| 2157 | diff --git a/deps/uv/src/unix/process.c b/deps/uv/src/unix/process.c | ||
| 2158 | index dd58c18d9b9..bbf367b57d3 100644 | ||
| 2159 | --- a/deps/uv/src/unix/process.c | ||
| 2160 | +++ b/deps/uv/src/unix/process.c | ||
| 2161 | @@ -108,17 +108,17 @@ void uv__wait_children(uv_loop_t* loop) { | ||
| 2162 | int status; | ||
| 2163 | int options; | ||
| 2164 | pid_t pid; | ||
| 2165 | - struct uv__queue pending; | ||
| 2166 | - struct uv__queue* q; | ||
| 2167 | - struct uv__queue* h; | ||
| 2168 | + QUEUE pending; | ||
| 2169 | + QUEUE* q; | ||
| 2170 | + QUEUE* h; | ||
| 2171 | |||
| 2172 | - uv__queue_init(&pending); | ||
| 2173 | + QUEUE_INIT(&pending); | ||
| 2174 | |||
| 2175 | h = &loop->process_handles; | ||
| 2176 | - q = uv__queue_head(h); | ||
| 2177 | + q = QUEUE_HEAD(h); | ||
| 2178 | while (q != h) { | ||
| 2179 | - process = uv__queue_data(q, uv_process_t, queue); | ||
| 2180 | - q = uv__queue_next(q); | ||
| 2181 | + process = QUEUE_DATA(q, uv_process_t, queue); | ||
| 2182 | + q = QUEUE_NEXT(q); | ||
| 2183 | |||
| 2184 | #ifndef UV_USE_SIGCHLD | ||
| 2185 | if ((process->flags & UV_HANDLE_REAP) == 0) | ||
| 2186 | @@ -149,18 +149,18 @@ void uv__wait_children(uv_loop_t* loop) { | ||
| 2187 | |||
| 2188 | assert(pid == process->pid); | ||
| 2189 | process->status = status; | ||
| 2190 | - uv__queue_remove(&process->queue); | ||
| 2191 | - uv__queue_insert_tail(&pending, &process->queue); | ||
| 2192 | + QUEUE_REMOVE(&process->queue); | ||
| 2193 | + QUEUE_INSERT_TAIL(&pending, &process->queue); | ||
| 2194 | } | ||
| 2195 | |||
| 2196 | h = &pending; | ||
| 2197 | - q = uv__queue_head(h); | ||
| 2198 | + q = QUEUE_HEAD(h); | ||
| 2199 | while (q != h) { | ||
| 2200 | - process = uv__queue_data(q, uv_process_t, queue); | ||
| 2201 | - q = uv__queue_next(q); | ||
| 2202 | + process = QUEUE_DATA(q, uv_process_t, queue); | ||
| 2203 | + q = QUEUE_NEXT(q); | ||
| 2204 | |||
| 2205 | - uv__queue_remove(&process->queue); | ||
| 2206 | - uv__queue_init(&process->queue); | ||
| 2207 | + QUEUE_REMOVE(&process->queue); | ||
| 2208 | + QUEUE_INIT(&process->queue); | ||
| 2209 | uv__handle_stop(process); | ||
| 2210 | |||
| 2211 | if (process->exit_cb == NULL) | ||
| 2212 | @@ -176,7 +176,7 @@ void uv__wait_children(uv_loop_t* loop) { | ||
| 2213 | |||
| 2214 | process->exit_cb(process, exit_status, term_signal); | ||
| 2215 | } | ||
| 2216 | - assert(uv__queue_empty(&pending)); | ||
| 2217 | + assert(QUEUE_EMPTY(&pending)); | ||
| 2218 | } | ||
| 2219 | |||
| 2220 | /* | ||
| 2221 | @@ -978,7 +978,7 @@ int uv_spawn(uv_loop_t* loop, | ||
| 2222 | UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); | ||
| 2223 | |||
| 2224 | uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS); | ||
| 2225 | - uv__queue_init(&process->queue); | ||
| 2226 | + QUEUE_INIT(&process->queue); | ||
| 2227 | process->status = 0; | ||
| 2228 | |||
| 2229 | stdio_count = options->stdio_count; | ||
| 2230 | @@ -1041,7 +1041,7 @@ int uv_spawn(uv_loop_t* loop, | ||
| 2231 | |||
| 2232 | process->pid = pid; | ||
| 2233 | process->exit_cb = options->exit_cb; | ||
| 2234 | - uv__queue_insert_tail(&loop->process_handles, &process->queue); | ||
| 2235 | + QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue); | ||
| 2236 | uv__handle_start(process); | ||
| 2237 | } | ||
| 2238 | |||
| 2239 | @@ -1103,10 +1103,10 @@ int uv_kill(int pid, int signum) { | ||
| 2240 | |||
| 2241 | |||
| 2242 | void uv__process_close(uv_process_t* handle) { | ||
| 2243 | - uv__queue_remove(&handle->queue); | ||
| 2244 | + QUEUE_REMOVE(&handle->queue); | ||
| 2245 | uv__handle_stop(handle); | ||
| 2246 | #ifdef UV_USE_SIGCHLD | ||
| 2247 | - if (uv__queue_empty(&handle->loop->process_handles)) | ||
| 2248 | + if (QUEUE_EMPTY(&handle->loop->process_handles)) | ||
| 2249 | uv_signal_stop(&handle->loop->child_watcher); | ||
| 2250 | #endif | ||
| 2251 | } | ||
| 2252 | diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c | ||
| 2253 | index 63aba5a60e0..bb70523f561 100644 | ||
| 2254 | --- a/deps/uv/src/unix/signal.c | ||
| 2255 | +++ b/deps/uv/src/unix/signal.c | ||
| 2256 | @@ -291,16 +291,16 @@ int uv__signal_loop_fork(uv_loop_t* loop) { | ||
| 2257 | |||
| 2258 | |||
| 2259 | void uv__signal_loop_cleanup(uv_loop_t* loop) { | ||
| 2260 | - struct uv__queue* q; | ||
| 2261 | + QUEUE* q; | ||
| 2262 | |||
| 2263 | /* Stop all the signal watchers that are still attached to this loop. This | ||
| 2264 | * ensures that the (shared) signal tree doesn't contain any invalid entries | ||
| 2265 | * entries, and that signal handlers are removed when appropriate. | ||
| 2266 | - * It's safe to use uv__queue_foreach here because the handles and the handle | ||
| 2267 | + * It's safe to use QUEUE_FOREACH here because the handles and the handle | ||
| 2268 | * queue are not modified by uv__signal_stop(). | ||
| 2269 | */ | ||
| 2270 | - uv__queue_foreach(q, &loop->handle_queue) { | ||
| 2271 | - uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue); | ||
| 2272 | + QUEUE_FOREACH(q, &loop->handle_queue) { | ||
| 2273 | + uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||
| 2274 | |||
| 2275 | if (handle->type == UV_SIGNAL) | ||
| 2276 | uv__signal_stop((uv_signal_t*) handle); | ||
| 2277 | diff --git a/deps/uv/src/unix/stream.c b/deps/uv/src/unix/stream.c | ||
| 2278 | index 28c4d5463c4..03f92b5045a 100644 | ||
| 2279 | --- a/deps/uv/src/unix/stream.c | ||
| 2280 | +++ b/deps/uv/src/unix/stream.c | ||
| 2281 | @@ -94,8 +94,8 @@ void uv__stream_init(uv_loop_t* loop, | ||
| 2282 | stream->accepted_fd = -1; | ||
| 2283 | stream->queued_fds = NULL; | ||
| 2284 | stream->delayed_error = 0; | ||
| 2285 | - uv__queue_init(&stream->write_queue); | ||
| 2286 | - uv__queue_init(&stream->write_completed_queue); | ||
| 2287 | + QUEUE_INIT(&stream->write_queue); | ||
| 2288 | + QUEUE_INIT(&stream->write_completed_queue); | ||
| 2289 | stream->write_queue_size = 0; | ||
| 2290 | |||
| 2291 | if (loop->emfile_fd == -1) { | ||
| 2292 | @@ -439,15 +439,15 @@ int uv__stream_open(uv_stream_t* stream, int fd, int flags) { | ||
| 2293 | |||
| 2294 | void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { | ||
| 2295 | uv_write_t* req; | ||
| 2296 | - struct uv__queue* q; | ||
| 2297 | - while (!uv__queue_empty(&stream->write_queue)) { | ||
| 2298 | - q = uv__queue_head(&stream->write_queue); | ||
| 2299 | - uv__queue_remove(q); | ||
| 2300 | + QUEUE* q; | ||
| 2301 | + while (!QUEUE_EMPTY(&stream->write_queue)) { | ||
| 2302 | + q = QUEUE_HEAD(&stream->write_queue); | ||
| 2303 | + QUEUE_REMOVE(q); | ||
| 2304 | |||
| 2305 | - req = uv__queue_data(q, uv_write_t, queue); | ||
| 2306 | + req = QUEUE_DATA(q, uv_write_t, queue); | ||
| 2307 | req->error = error; | ||
| 2308 | |||
| 2309 | - uv__queue_insert_tail(&stream->write_completed_queue, &req->queue); | ||
| 2310 | + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); | ||
| 2311 | } | ||
| 2312 | } | ||
| 2313 | |||
| 2314 | @@ -627,7 +627,7 @@ static void uv__drain(uv_stream_t* stream) { | ||
| 2315 | uv_shutdown_t* req; | ||
| 2316 | int err; | ||
| 2317 | |||
| 2318 | - assert(uv__queue_empty(&stream->write_queue)); | ||
| 2319 | + assert(QUEUE_EMPTY(&stream->write_queue)); | ||
| 2320 | if (!(stream->flags & UV_HANDLE_CLOSING)) { | ||
| 2321 | uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); | ||
| 2322 | uv__stream_osx_interrupt_select(stream); | ||
| 2323 | @@ -714,7 +714,7 @@ static void uv__write_req_finish(uv_write_t* req) { | ||
| 2324 | uv_stream_t* stream = req->handle; | ||
| 2325 | |||
| 2326 | /* Pop the req off tcp->write_queue. */ | ||
| 2327 | - uv__queue_remove(&req->queue); | ||
| 2328 | + QUEUE_REMOVE(&req->queue); | ||
| 2329 | |||
| 2330 | /* Only free when there was no error. On error, we touch up write_queue_size | ||
| 2331 | * right before making the callback. The reason we don't do that right away | ||
| 2332 | @@ -731,7 +731,7 @@ static void uv__write_req_finish(uv_write_t* req) { | ||
| 2333 | /* Add it to the write_completed_queue where it will have its | ||
| 2334 | * callback called in the near future. | ||
| 2335 | */ | ||
| 2336 | - uv__queue_insert_tail(&stream->write_completed_queue, &req->queue); | ||
| 2337 | + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); | ||
| 2338 | uv__io_feed(stream->loop, &stream->io_watcher); | ||
| 2339 | } | ||
| 2340 | |||
| 2341 | @@ -837,7 +837,7 @@ static int uv__try_write(uv_stream_t* stream, | ||
| 2342 | } | ||
| 2343 | |||
| 2344 | static void uv__write(uv_stream_t* stream) { | ||
| 2345 | - struct uv__queue* q; | ||
| 2346 | + QUEUE* q; | ||
| 2347 | uv_write_t* req; | ||
| 2348 | ssize_t n; | ||
| 2349 | int count; | ||
| 2350 | @@ -851,11 +851,11 @@ static void uv__write(uv_stream_t* stream) { | ||
| 2351 | count = 32; | ||
| 2352 | |||
| 2353 | for (;;) { | ||
| 2354 | - if (uv__queue_empty(&stream->write_queue)) | ||
| 2355 | + if (QUEUE_EMPTY(&stream->write_queue)) | ||
| 2356 | return; | ||
| 2357 | |||
| 2358 | - q = uv__queue_head(&stream->write_queue); | ||
| 2359 | - req = uv__queue_data(q, uv_write_t, queue); | ||
| 2360 | + q = QUEUE_HEAD(&stream->write_queue); | ||
| 2361 | + req = QUEUE_DATA(q, uv_write_t, queue); | ||
| 2362 | assert(req->handle == stream); | ||
| 2363 | |||
| 2364 | n = uv__try_write(stream, | ||
| 2365 | @@ -899,19 +899,19 @@ error: | ||
| 2366 | |||
| 2367 | static void uv__write_callbacks(uv_stream_t* stream) { | ||
| 2368 | uv_write_t* req; | ||
| 2369 | - struct uv__queue* q; | ||
| 2370 | - struct uv__queue pq; | ||
| 2371 | + QUEUE* q; | ||
| 2372 | + QUEUE pq; | ||
| 2373 | |||
| 2374 | - if (uv__queue_empty(&stream->write_completed_queue)) | ||
| 2375 | + if (QUEUE_EMPTY(&stream->write_completed_queue)) | ||
| 2376 | return; | ||
| 2377 | |||
| 2378 | - uv__queue_move(&stream->write_completed_queue, &pq); | ||
| 2379 | + QUEUE_MOVE(&stream->write_completed_queue, &pq); | ||
| 2380 | |||
| 2381 | - while (!uv__queue_empty(&pq)) { | ||
| 2382 | + while (!QUEUE_EMPTY(&pq)) { | ||
| 2383 | /* Pop a req off write_completed_queue. */ | ||
| 2384 | - q = uv__queue_head(&pq); | ||
| 2385 | - req = uv__queue_data(q, uv_write_t, queue); | ||
| 2386 | - uv__queue_remove(q); | ||
| 2387 | + q = QUEUE_HEAD(&pq); | ||
| 2388 | + req = QUEUE_DATA(q, uv_write_t, queue); | ||
| 2389 | + QUEUE_REMOVE(q); | ||
| 2390 | uv__req_unregister(stream->loop, req); | ||
| 2391 | |||
| 2392 | if (req->bufs != NULL) { | ||
| 2393 | @@ -1174,7 +1174,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { | ||
| 2394 | stream->shutdown_req = req; | ||
| 2395 | stream->flags &= ~UV_HANDLE_WRITABLE; | ||
| 2396 | |||
| 2397 | - if (uv__queue_empty(&stream->write_queue)) | ||
| 2398 | + if (QUEUE_EMPTY(&stream->write_queue)) | ||
| 2399 | uv__io_feed(stream->loop, &stream->io_watcher); | ||
| 2400 | |||
| 2401 | return 0; | ||
| 2402 | @@ -1227,7 +1227,7 @@ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||
| 2403 | uv__write_callbacks(stream); | ||
| 2404 | |||
| 2405 | /* Write queue drained. */ | ||
| 2406 | - if (uv__queue_empty(&stream->write_queue)) | ||
| 2407 | + if (QUEUE_EMPTY(&stream->write_queue)) | ||
| 2408 | uv__drain(stream); | ||
| 2409 | } | ||
| 2410 | } | ||
| 2411 | @@ -1270,7 +1270,7 @@ static void uv__stream_connect(uv_stream_t* stream) { | ||
| 2412 | stream->connect_req = NULL; | ||
| 2413 | uv__req_unregister(stream->loop, req); | ||
| 2414 | |||
| 2415 | - if (error < 0 || uv__queue_empty(&stream->write_queue)) { | ||
| 2416 | + if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { | ||
| 2417 | uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); | ||
| 2418 | } | ||
| 2419 | |||
| 2420 | @@ -1352,7 +1352,7 @@ int uv_write2(uv_write_t* req, | ||
| 2421 | req->handle = stream; | ||
| 2422 | req->error = 0; | ||
| 2423 | req->send_handle = send_handle; | ||
| 2424 | - uv__queue_init(&req->queue); | ||
| 2425 | + QUEUE_INIT(&req->queue); | ||
| 2426 | |||
| 2427 | req->bufs = req->bufsml; | ||
| 2428 | if (nbufs > ARRAY_SIZE(req->bufsml)) | ||
| 2429 | @@ -1367,7 +1367,7 @@ int uv_write2(uv_write_t* req, | ||
| 2430 | stream->write_queue_size += uv__count_bufs(bufs, nbufs); | ||
| 2431 | |||
| 2432 | /* Append the request to write_queue. */ | ||
| 2433 | - uv__queue_insert_tail(&stream->write_queue, &req->queue); | ||
| 2434 | + QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); | ||
| 2435 | |||
| 2436 | /* If the queue was empty when this function began, we should attempt to | ||
| 2437 | * do the write immediately. Otherwise start the write_watcher and wait | ||
| 2438 | diff --git a/deps/uv/src/unix/sunos.c b/deps/uv/src/unix/sunos.c | ||
| 2439 | index 2d6bae79604..75b6fbad493 100644 | ||
| 2440 | --- a/deps/uv/src/unix/sunos.c | ||
| 2441 | +++ b/deps/uv/src/unix/sunos.c | ||
| 2442 | @@ -148,7 +148,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2443 | struct port_event events[1024]; | ||
| 2444 | struct port_event* pe; | ||
| 2445 | struct timespec spec; | ||
| 2446 | - struct uv__queue* q; | ||
| 2447 | + QUEUE* q; | ||
| 2448 | uv__io_t* w; | ||
| 2449 | sigset_t* pset; | ||
| 2450 | sigset_t set; | ||
| 2451 | @@ -166,16 +166,16 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2452 | int reset_timeout; | ||
| 2453 | |||
| 2454 | if (loop->nfds == 0) { | ||
| 2455 | - assert(uv__queue_empty(&loop->watcher_queue)); | ||
| 2456 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 2457 | return; | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | - while (!uv__queue_empty(&loop->watcher_queue)) { | ||
| 2461 | - q = uv__queue_head(&loop->watcher_queue); | ||
| 2462 | - uv__queue_remove(q); | ||
| 2463 | - uv__queue_init(q); | ||
| 2464 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 2465 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 2466 | + QUEUE_REMOVE(q); | ||
| 2467 | + QUEUE_INIT(q); | ||
| 2468 | |||
| 2469 | - w = uv__queue_data(q, uv__io_t, watcher_queue); | ||
| 2470 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 2471 | assert(w->pevents != 0); | ||
| 2472 | |||
| 2473 | if (port_associate(loop->backend_fd, | ||
| 2474 | @@ -316,8 +316,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 2475 | continue; /* Disabled by callback. */ | ||
| 2476 | |||
| 2477 | /* Events Ports operates in oneshot mode, rearm timer on next run. */ | ||
| 2478 | - if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) | ||
| 2479 | - uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||
| 2480 | + if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) | ||
| 2481 | + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||
| 2482 | } | ||
| 2483 | |||
| 2484 | uv__metrics_inc_events(loop, nevents); | ||
| 2485 | diff --git a/deps/uv/src/unix/tcp.c b/deps/uv/src/unix/tcp.c | ||
| 2486 | index d6c848f4610..ab4e06c2f67 100644 | ||
| 2487 | --- a/deps/uv/src/unix/tcp.c | ||
| 2488 | +++ b/deps/uv/src/unix/tcp.c | ||
| 2489 | @@ -124,7 +124,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { | ||
| 2490 | if (domain != AF_UNSPEC) { | ||
| 2491 | err = new_socket(tcp, domain, 0); | ||
| 2492 | if (err) { | ||
| 2493 | - uv__queue_remove(&tcp->handle_queue); | ||
| 2494 | + QUEUE_REMOVE(&tcp->handle_queue); | ||
| 2495 | if (tcp->io_watcher.fd != -1) | ||
| 2496 | uv__close(tcp->io_watcher.fd); | ||
| 2497 | tcp->io_watcher.fd = -1; | ||
| 2498 | @@ -252,7 +252,7 @@ out: | ||
| 2499 | uv__req_init(handle->loop, req, UV_CONNECT); | ||
| 2500 | req->cb = cb; | ||
| 2501 | req->handle = (uv_stream_t*) handle; | ||
| 2502 | - uv__queue_init(&req->queue); | ||
| 2503 | + QUEUE_INIT(&req->queue); | ||
| 2504 | handle->connect_req = req; | ||
| 2505 | |||
| 2506 | uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||
| 2507 | diff --git a/deps/uv/src/unix/tty.c b/deps/uv/src/unix/tty.c | ||
| 2508 | index d099bdb3b67..7a5390c1a8b 100644 | ||
| 2509 | --- a/deps/uv/src/unix/tty.c | ||
| 2510 | +++ b/deps/uv/src/unix/tty.c | ||
| 2511 | @@ -222,7 +222,7 @@ skip: | ||
| 2512 | int rc = r; | ||
| 2513 | if (newfd != -1) | ||
| 2514 | uv__close(newfd); | ||
| 2515 | - uv__queue_remove(&tty->handle_queue); | ||
| 2516 | + QUEUE_REMOVE(&tty->handle_queue); | ||
| 2517 | do | ||
| 2518 | r = fcntl(fd, F_SETFL, saved_flags); | ||
| 2519 | while (r == -1 && errno == EINTR); | ||
| 2520 | diff --git a/deps/uv/src/unix/udp.c b/deps/uv/src/unix/udp.c | ||
| 2521 | index c2814512a5f..f556808fbae 100644 | ||
| 2522 | --- a/deps/uv/src/unix/udp.c | ||
| 2523 | +++ b/deps/uv/src/unix/udp.c | ||
| 2524 | @@ -62,18 +62,18 @@ void uv__udp_close(uv_udp_t* handle) { | ||
| 2525 | |||
| 2526 | void uv__udp_finish_close(uv_udp_t* handle) { | ||
| 2527 | uv_udp_send_t* req; | ||
| 2528 | - struct uv__queue* q; | ||
| 2529 | + QUEUE* q; | ||
| 2530 | |||
| 2531 | assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); | ||
| 2532 | assert(handle->io_watcher.fd == -1); | ||
| 2533 | |||
| 2534 | - while (!uv__queue_empty(&handle->write_queue)) { | ||
| 2535 | - q = uv__queue_head(&handle->write_queue); | ||
| 2536 | - uv__queue_remove(q); | ||
| 2537 | + while (!QUEUE_EMPTY(&handle->write_queue)) { | ||
| 2538 | + q = QUEUE_HEAD(&handle->write_queue); | ||
| 2539 | + QUEUE_REMOVE(q); | ||
| 2540 | |||
| 2541 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2542 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2543 | req->status = UV_ECANCELED; | ||
| 2544 | - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||
| 2545 | + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||
| 2546 | } | ||
| 2547 | |||
| 2548 | uv__udp_run_completed(handle); | ||
| 2549 | @@ -90,16 +90,16 @@ void uv__udp_finish_close(uv_udp_t* handle) { | ||
| 2550 | |||
| 2551 | static void uv__udp_run_completed(uv_udp_t* handle) { | ||
| 2552 | uv_udp_send_t* req; | ||
| 2553 | - struct uv__queue* q; | ||
| 2554 | + QUEUE* q; | ||
| 2555 | |||
| 2556 | assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING)); | ||
| 2557 | handle->flags |= UV_HANDLE_UDP_PROCESSING; | ||
| 2558 | |||
| 2559 | - while (!uv__queue_empty(&handle->write_completed_queue)) { | ||
| 2560 | - q = uv__queue_head(&handle->write_completed_queue); | ||
| 2561 | - uv__queue_remove(q); | ||
| 2562 | + while (!QUEUE_EMPTY(&handle->write_completed_queue)) { | ||
| 2563 | + q = QUEUE_HEAD(&handle->write_completed_queue); | ||
| 2564 | + QUEUE_REMOVE(q); | ||
| 2565 | |||
| 2566 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2567 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2568 | uv__req_unregister(handle->loop, req); | ||
| 2569 | |||
| 2570 | handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); | ||
| 2571 | @@ -121,7 +121,7 @@ static void uv__udp_run_completed(uv_udp_t* handle) { | ||
| 2572 | req->send_cb(req, req->status); | ||
| 2573 | } | ||
| 2574 | |||
| 2575 | - if (uv__queue_empty(&handle->write_queue)) { | ||
| 2576 | + if (QUEUE_EMPTY(&handle->write_queue)) { | ||
| 2577 | /* Pending queue and completion queue empty, stop watcher. */ | ||
| 2578 | uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT); | ||
| 2579 | if (!uv__io_active(&handle->io_watcher, POLLIN)) | ||
| 2580 | @@ -280,20 +280,20 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { | ||
| 2581 | uv_udp_send_t* req; | ||
| 2582 | struct mmsghdr h[20]; | ||
| 2583 | struct mmsghdr* p; | ||
| 2584 | - struct uv__queue* q; | ||
| 2585 | + QUEUE* q; | ||
| 2586 | ssize_t npkts; | ||
| 2587 | size_t pkts; | ||
| 2588 | size_t i; | ||
| 2589 | |||
| 2590 | - if (uv__queue_empty(&handle->write_queue)) | ||
| 2591 | + if (QUEUE_EMPTY(&handle->write_queue)) | ||
| 2592 | return; | ||
| 2593 | |||
| 2594 | write_queue_drain: | ||
| 2595 | - for (pkts = 0, q = uv__queue_head(&handle->write_queue); | ||
| 2596 | + for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue); | ||
| 2597 | pkts < ARRAY_SIZE(h) && q != &handle->write_queue; | ||
| 2598 | - ++pkts, q = uv__queue_head(q)) { | ||
| 2599 | + ++pkts, q = QUEUE_HEAD(q)) { | ||
| 2600 | assert(q != NULL); | ||
| 2601 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2602 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2603 | assert(req != NULL); | ||
| 2604 | |||
| 2605 | p = &h[pkts]; | ||
| 2606 | @@ -325,16 +325,16 @@ write_queue_drain: | ||
| 2607 | if (npkts < 1) { | ||
| 2608 | if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) | ||
| 2609 | return; | ||
| 2610 | - for (i = 0, q = uv__queue_head(&handle->write_queue); | ||
| 2611 | + for (i = 0, q = QUEUE_HEAD(&handle->write_queue); | ||
| 2612 | i < pkts && q != &handle->write_queue; | ||
| 2613 | - ++i, q = uv__queue_head(&handle->write_queue)) { | ||
| 2614 | + ++i, q = QUEUE_HEAD(&handle->write_queue)) { | ||
| 2615 | assert(q != NULL); | ||
| 2616 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2617 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2618 | assert(req != NULL); | ||
| 2619 | |||
| 2620 | req->status = UV__ERR(errno); | ||
| 2621 | - uv__queue_remove(&req->queue); | ||
| 2622 | - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||
| 2623 | + QUEUE_REMOVE(&req->queue); | ||
| 2624 | + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||
| 2625 | } | ||
| 2626 | uv__io_feed(handle->loop, &handle->io_watcher); | ||
| 2627 | return; | ||
| 2628 | @@ -343,11 +343,11 @@ write_queue_drain: | ||
| 2629 | /* Safety: npkts known to be >0 below. Hence cast from ssize_t | ||
| 2630 | * to size_t safe. | ||
| 2631 | */ | ||
| 2632 | - for (i = 0, q = uv__queue_head(&handle->write_queue); | ||
| 2633 | + for (i = 0, q = QUEUE_HEAD(&handle->write_queue); | ||
| 2634 | i < (size_t)npkts && q != &handle->write_queue; | ||
| 2635 | - ++i, q = uv__queue_head(&handle->write_queue)) { | ||
| 2636 | + ++i, q = QUEUE_HEAD(&handle->write_queue)) { | ||
| 2637 | assert(q != NULL); | ||
| 2638 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2639 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2640 | assert(req != NULL); | ||
| 2641 | |||
| 2642 | req->status = req->bufs[0].len; | ||
| 2643 | @@ -357,25 +357,25 @@ write_queue_drain: | ||
| 2644 | * why we don't handle partial writes. Just pop the request | ||
| 2645 | * off the write queue and onto the completed queue, done. | ||
| 2646 | */ | ||
| 2647 | - uv__queue_remove(&req->queue); | ||
| 2648 | - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||
| 2649 | + QUEUE_REMOVE(&req->queue); | ||
| 2650 | + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||
| 2651 | } | ||
| 2652 | |||
| 2653 | /* couldn't batch everything, continue sending (jump to avoid stack growth) */ | ||
| 2654 | - if (!uv__queue_empty(&handle->write_queue)) | ||
| 2655 | + if (!QUEUE_EMPTY(&handle->write_queue)) | ||
| 2656 | goto write_queue_drain; | ||
| 2657 | uv__io_feed(handle->loop, &handle->io_watcher); | ||
| 2658 | #else /* __linux__ || ____FreeBSD__ */ | ||
| 2659 | uv_udp_send_t* req; | ||
| 2660 | struct msghdr h; | ||
| 2661 | - struct uv__queue* q; | ||
| 2662 | + QUEUE* q; | ||
| 2663 | ssize_t size; | ||
| 2664 | |||
| 2665 | - while (!uv__queue_empty(&handle->write_queue)) { | ||
| 2666 | - q = uv__queue_head(&handle->write_queue); | ||
| 2667 | + while (!QUEUE_EMPTY(&handle->write_queue)) { | ||
| 2668 | + q = QUEUE_HEAD(&handle->write_queue); | ||
| 2669 | assert(q != NULL); | ||
| 2670 | |||
| 2671 | - req = uv__queue_data(q, uv_udp_send_t, queue); | ||
| 2672 | + req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||
| 2673 | assert(req != NULL); | ||
| 2674 | |||
| 2675 | memset(&h, 0, sizeof h); | ||
| 2676 | @@ -414,8 +414,8 @@ write_queue_drain: | ||
| 2677 | * why we don't handle partial writes. Just pop the request | ||
| 2678 | * off the write queue and onto the completed queue, done. | ||
| 2679 | */ | ||
| 2680 | - uv__queue_remove(&req->queue); | ||
| 2681 | - uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||
| 2682 | + QUEUE_REMOVE(&req->queue); | ||
| 2683 | + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||
| 2684 | uv__io_feed(handle->loop, &handle->io_watcher); | ||
| 2685 | } | ||
| 2686 | #endif /* __linux__ || ____FreeBSD__ */ | ||
| 2687 | @@ -729,7 +729,7 @@ int uv__udp_send(uv_udp_send_t* req, | ||
| 2688 | memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); | ||
| 2689 | handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); | ||
| 2690 | handle->send_queue_count++; | ||
| 2691 | - uv__queue_insert_tail(&handle->write_queue, &req->queue); | ||
| 2692 | + QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); | ||
| 2693 | uv__handle_start(handle); | ||
| 2694 | |||
| 2695 | if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) { | ||
| 2696 | @@ -739,7 +739,7 @@ int uv__udp_send(uv_udp_send_t* req, | ||
| 2697 | * away. In such cases the `io_watcher` has to be queued for asynchronous | ||
| 2698 | * write. | ||
| 2699 | */ | ||
| 2700 | - if (!uv__queue_empty(&handle->write_queue)) | ||
| 2701 | + if (!QUEUE_EMPTY(&handle->write_queue)) | ||
| 2702 | uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||
| 2703 | } else { | ||
| 2704 | uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||
| 2705 | @@ -1007,8 +1007,8 @@ int uv__udp_init_ex(uv_loop_t* loop, | ||
| 2706 | handle->send_queue_size = 0; | ||
| 2707 | handle->send_queue_count = 0; | ||
| 2708 | uv__io_init(&handle->io_watcher, uv__udp_io, fd); | ||
| 2709 | - uv__queue_init(&handle->write_queue); | ||
| 2710 | - uv__queue_init(&handle->write_completed_queue); | ||
| 2711 | + QUEUE_INIT(&handle->write_queue); | ||
| 2712 | + QUEUE_INIT(&handle->write_completed_queue); | ||
| 2713 | |||
| 2714 | return 0; | ||
| 2715 | } | ||
| 2716 | diff --git a/deps/uv/src/uv-common.c b/deps/uv/src/uv-common.c | ||
| 2717 | index 916f3f4e006..cec771fab21 100644 | ||
| 2718 | --- a/deps/uv/src/uv-common.c | ||
| 2719 | +++ b/deps/uv/src/uv-common.c | ||
| 2720 | @@ -533,17 +533,17 @@ int uv_udp_recv_stop(uv_udp_t* handle) { | ||
| 2721 | |||
| 2722 | |||
| 2723 | void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { | ||
| 2724 | - struct uv__queue queue; | ||
| 2725 | - struct uv__queue* q; | ||
| 2726 | + QUEUE queue; | ||
| 2727 | + QUEUE* q; | ||
| 2728 | uv_handle_t* h; | ||
| 2729 | |||
| 2730 | - uv__queue_move(&loop->handle_queue, &queue); | ||
| 2731 | - while (!uv__queue_empty(&queue)) { | ||
| 2732 | - q = uv__queue_head(&queue); | ||
| 2733 | - h = uv__queue_data(q, uv_handle_t, handle_queue); | ||
| 2734 | + QUEUE_MOVE(&loop->handle_queue, &queue); | ||
| 2735 | + while (!QUEUE_EMPTY(&queue)) { | ||
| 2736 | + q = QUEUE_HEAD(&queue); | ||
| 2737 | + h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||
| 2738 | |||
| 2739 | - uv__queue_remove(q); | ||
| 2740 | - uv__queue_insert_tail(&loop->handle_queue, q); | ||
| 2741 | + QUEUE_REMOVE(q); | ||
| 2742 | + QUEUE_INSERT_TAIL(&loop->handle_queue, q); | ||
| 2743 | |||
| 2744 | if (h->flags & UV_HANDLE_INTERNAL) continue; | ||
| 2745 | walk_cb(h, arg); | ||
| 2746 | @@ -553,14 +553,14 @@ void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { | ||
| 2747 | |||
| 2748 | static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) { | ||
| 2749 | const char* type; | ||
| 2750 | - struct uv__queue* q; | ||
| 2751 | + QUEUE* q; | ||
| 2752 | uv_handle_t* h; | ||
| 2753 | |||
| 2754 | if (loop == NULL) | ||
| 2755 | loop = uv_default_loop(); | ||
| 2756 | |||
| 2757 | - uv__queue_foreach(q, &loop->handle_queue) { | ||
| 2758 | - h = uv__queue_data(q, uv_handle_t, handle_queue); | ||
| 2759 | + QUEUE_FOREACH(q, &loop->handle_queue) { | ||
| 2760 | + h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||
| 2761 | |||
| 2762 | if (only_active && !uv__is_active(h)) | ||
| 2763 | continue; | ||
| 2764 | @@ -846,7 +846,7 @@ uv_loop_t* uv_loop_new(void) { | ||
| 2765 | |||
| 2766 | |||
| 2767 | int uv_loop_close(uv_loop_t* loop) { | ||
| 2768 | - struct uv__queue* q; | ||
| 2769 | + QUEUE* q; | ||
| 2770 | uv_handle_t* h; | ||
| 2771 | #ifndef NDEBUG | ||
| 2772 | void* saved_data; | ||
| 2773 | @@ -855,8 +855,8 @@ int uv_loop_close(uv_loop_t* loop) { | ||
| 2774 | if (uv__has_active_reqs(loop)) | ||
| 2775 | return UV_EBUSY; | ||
| 2776 | |||
| 2777 | - uv__queue_foreach(q, &loop->handle_queue) { | ||
| 2778 | - h = uv__queue_data(q, uv_handle_t, handle_queue); | ||
| 2779 | + QUEUE_FOREACH(q, &loop->handle_queue) { | ||
| 2780 | + h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||
| 2781 | if (!(h->flags & UV_HANDLE_INTERNAL)) | ||
| 2782 | return UV_EBUSY; | ||
| 2783 | } | ||
| 2784 | diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h | ||
| 2785 | index cd57e5a3515..decde5362c8 100644 | ||
| 2786 | --- a/deps/uv/src/uv-common.h | ||
| 2787 | +++ b/deps/uv/src/uv-common.h | ||
| 2788 | @@ -323,7 +323,7 @@ void uv__threadpool_cleanup(void); | ||
| 2789 | (h)->loop = (loop_); \ | ||
| 2790 | (h)->type = (type_); \ | ||
| 2791 | (h)->flags = UV_HANDLE_REF; /* Ref the loop when active. */ \ | ||
| 2792 | - uv__queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue); \ | ||
| 2793 | + QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \ | ||
| 2794 | uv__handle_platform_init(h); \ | ||
| 2795 | } \ | ||
| 2796 | while (0) | ||
| 2797 | @@ -415,7 +415,6 @@ struct uv__iou { | ||
| 2798 | size_t sqelen; | ||
| 2799 | int ringfd; | ||
| 2800 | uint32_t in_flight; | ||
| 2801 | - uint32_t flags; | ||
| 2802 | }; | ||
| 2803 | #endif /* __linux__ */ | ||
| 2804 | |||
| 2805 | diff --git a/deps/uv/src/win/core.c b/deps/uv/src/win/core.c | ||
| 2806 | index e9885a0f1ff..9a3be58849a 100644 | ||
| 2807 | --- a/deps/uv/src/win/core.c | ||
| 2808 | +++ b/deps/uv/src/win/core.c | ||
| 2809 | @@ -255,8 +255,8 @@ int uv_loop_init(uv_loop_t* loop) { | ||
| 2810 | loop->time = 0; | ||
| 2811 | uv_update_time(loop); | ||
| 2812 | |||
| 2813 | - uv__queue_init(&loop->wq); | ||
| 2814 | - uv__queue_init(&loop->handle_queue); | ||
| 2815 | + QUEUE_INIT(&loop->wq); | ||
| 2816 | + QUEUE_INIT(&loop->handle_queue); | ||
| 2817 | loop->active_reqs.count = 0; | ||
| 2818 | loop->active_handles = 0; | ||
| 2819 | |||
| 2820 | @@ -358,7 +358,7 @@ void uv__loop_close(uv_loop_t* loop) { | ||
| 2821 | } | ||
| 2822 | |||
| 2823 | uv_mutex_lock(&loop->wq_mutex); | ||
| 2824 | - assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!"); | ||
| 2825 | + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); | ||
| 2826 | assert(!uv__has_active_reqs(loop)); | ||
| 2827 | uv_mutex_unlock(&loop->wq_mutex); | ||
| 2828 | uv_mutex_destroy(&loop->wq_mutex); | ||
| 2829 | diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c | ||
| 2830 | index fc209c54f47..deb9438d689 100644 | ||
| 2831 | --- a/deps/uv/src/win/fs.c | ||
| 2832 | +++ b/deps/uv/src/win/fs.c | ||
| 2833 | @@ -144,97 +144,26 @@ void uv__fs_init(void) { | ||
| 2834 | } | ||
| 2835 | |||
| 2836 | |||
| 2837 | -static int32_t fs__decode_wtf8_char(const char** input) { | ||
| 2838 | - uint32_t code_point; | ||
| 2839 | - uint8_t b1; | ||
| 2840 | - uint8_t b2; | ||
| 2841 | - uint8_t b3; | ||
| 2842 | - uint8_t b4; | ||
| 2843 | - | ||
| 2844 | - b1 = **input; | ||
| 2845 | - if (b1 <= 0x7F) | ||
| 2846 | - return b1; /* ASCII code point */ | ||
| 2847 | - if (b1 < 0xC2) | ||
| 2848 | - return -1; /* invalid: continuation byte */ | ||
| 2849 | - code_point = b1; | ||
| 2850 | - | ||
| 2851 | - b2 = *++*input; | ||
| 2852 | - if ((b2 & 0xC0) != 0x80) | ||
| 2853 | - return -1; /* invalid: not a continuation byte */ | ||
| 2854 | - code_point = (code_point << 6) | (b2 & 0x3F); | ||
| 2855 | - if (b1 <= 0xDF) | ||
| 2856 | - return 0x7FF & code_point; /* two-byte character */ | ||
| 2857 | - | ||
| 2858 | - b3 = *++*input; | ||
| 2859 | - if ((b3 & 0xC0) != 0x80) | ||
| 2860 | - return -1; /* invalid: not a continuation byte */ | ||
| 2861 | - code_point = (code_point << 6) | (b3 & 0x3F); | ||
| 2862 | - if (b1 <= 0xEF) | ||
| 2863 | - return 0xFFFF & code_point; /* three-byte character */ | ||
| 2864 | - | ||
| 2865 | - b4 = *++*input; | ||
| 2866 | - if ((b4 & 0xC0) != 0x80) | ||
| 2867 | - return -1; /* invalid: not a continuation byte */ | ||
| 2868 | - code_point = (code_point << 6) | (b4 & 0x3F); | ||
| 2869 | - if (b1 <= 0xF4) | ||
| 2870 | - if (code_point <= 0x10FFFF) | ||
| 2871 | - return code_point; /* four-byte character */ | ||
| 2872 | - | ||
| 2873 | - /* code point too large */ | ||
| 2874 | - return -1; | ||
| 2875 | -} | ||
| 2876 | - | ||
| 2877 | - | ||
| 2878 | -static ssize_t fs__get_length_wtf8(const char* source_ptr) { | ||
| 2879 | - size_t w_target_len = 0; | ||
| 2880 | - int32_t code_point; | ||
| 2881 | - | ||
| 2882 | - do { | ||
| 2883 | - code_point = fs__decode_wtf8_char(&source_ptr); | ||
| 2884 | - if (code_point < 0) | ||
| 2885 | - return -1; | ||
| 2886 | - if (code_point > 0xFFFF) | ||
| 2887 | - w_target_len++; | ||
| 2888 | - w_target_len++; | ||
| 2889 | - } while (*source_ptr++); | ||
| 2890 | - return w_target_len; | ||
| 2891 | -} | ||
| 2892 | - | ||
| 2893 | - | ||
| 2894 | -static void fs__wtf8_to_wide(const char* source_ptr, WCHAR* w_target) { | ||
| 2895 | - int32_t code_point; | ||
| 2896 | - | ||
| 2897 | - do { | ||
| 2898 | - code_point = fs__decode_wtf8_char(&source_ptr); | ||
| 2899 | - /* fs__get_length_wtf8 should have been called and checked first. */ | ||
| 2900 | - assert(code_point >= 0); | ||
| 2901 | - if (code_point > 0x10000) { | ||
| 2902 | - assert(code_point < 0x10FFFF); | ||
| 2903 | - *w_target++ = (((code_point - 0x10000) >> 10) + 0xD800); | ||
| 2904 | - *w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00; | ||
| 2905 | - } else { | ||
| 2906 | - *w_target++ = code_point; | ||
| 2907 | - } | ||
| 2908 | - } while (*source_ptr++); | ||
| 2909 | -} | ||
| 2910 | - | ||
| 2911 | - | ||
| 2912 | INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||
| 2913 | const char* new_path, const int copy_path) { | ||
| 2914 | - WCHAR* buf; | ||
| 2915 | - WCHAR* pos; | ||
| 2916 | - size_t buf_sz = 0; | ||
| 2917 | - size_t path_len = 0; | ||
| 2918 | - ssize_t pathw_len = 0; | ||
| 2919 | - ssize_t new_pathw_len = 0; | ||
| 2920 | + char* buf; | ||
| 2921 | + char* pos; | ||
| 2922 | + ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0; | ||
| 2923 | |||
| 2924 | /* new_path can only be set if path is also set. */ | ||
| 2925 | assert(new_path == NULL || path != NULL); | ||
| 2926 | |||
| 2927 | if (path != NULL) { | ||
| 2928 | - pathw_len = fs__get_length_wtf8(path); | ||
| 2929 | - if (pathw_len < 0) | ||
| 2930 | - return ERROR_INVALID_NAME; | ||
| 2931 | + pathw_len = MultiByteToWideChar(CP_UTF8, | ||
| 2932 | + 0, | ||
| 2933 | + path, | ||
| 2934 | + -1, | ||
| 2935 | + NULL, | ||
| 2936 | + 0); | ||
| 2937 | + if (pathw_len == 0) { | ||
| 2938 | + return GetLastError(); | ||
| 2939 | + } | ||
| 2940 | + | ||
| 2941 | buf_sz += pathw_len * sizeof(WCHAR); | ||
| 2942 | } | ||
| 2943 | |||
| 2944 | @@ -244,9 +173,16 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||
| 2945 | } | ||
| 2946 | |||
| 2947 | if (new_path != NULL) { | ||
| 2948 | - new_pathw_len = fs__get_length_wtf8(new_path); | ||
| 2949 | - if (new_pathw_len < 0) | ||
| 2950 | - return ERROR_INVALID_NAME; | ||
| 2951 | + new_pathw_len = MultiByteToWideChar(CP_UTF8, | ||
| 2952 | + 0, | ||
| 2953 | + new_path, | ||
| 2954 | + -1, | ||
| 2955 | + NULL, | ||
| 2956 | + 0); | ||
| 2957 | + if (new_pathw_len == 0) { | ||
| 2958 | + return GetLastError(); | ||
| 2959 | + } | ||
| 2960 | + | ||
| 2961 | buf_sz += new_pathw_len * sizeof(WCHAR); | ||
| 2962 | } | ||
| 2963 | |||
| 2964 | @@ -258,7 +194,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||
| 2965 | return 0; | ||
| 2966 | } | ||
| 2967 | |||
| 2968 | - buf = uv__malloc(buf_sz); | ||
| 2969 | + buf = (char*) uv__malloc(buf_sz); | ||
| 2970 | if (buf == NULL) { | ||
| 2971 | return ERROR_OUTOFMEMORY; | ||
| 2972 | } | ||
| 2973 | @@ -266,17 +202,29 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||
| 2974 | pos = buf; | ||
| 2975 | |||
| 2976 | if (path != NULL) { | ||
| 2977 | - fs__wtf8_to_wide(path, pos); | ||
| 2978 | - req->file.pathw = pos; | ||
| 2979 | - pos += pathw_len; | ||
| 2980 | + DWORD r = MultiByteToWideChar(CP_UTF8, | ||
| 2981 | + 0, | ||
| 2982 | + path, | ||
| 2983 | + -1, | ||
| 2984 | + (WCHAR*) pos, | ||
| 2985 | + pathw_len); | ||
| 2986 | + assert(r == (DWORD) pathw_len); | ||
| 2987 | + req->file.pathw = (WCHAR*) pos; | ||
| 2988 | + pos += r * sizeof(WCHAR); | ||
| 2989 | } else { | ||
| 2990 | req->file.pathw = NULL; | ||
| 2991 | } | ||
| 2992 | |||
| 2993 | if (new_path != NULL) { | ||
| 2994 | - fs__wtf8_to_wide(new_path, pos); | ||
| 2995 | - req->fs.info.new_pathw = pos; | ||
| 2996 | - pos += new_pathw_len; | ||
| 2997 | + DWORD r = MultiByteToWideChar(CP_UTF8, | ||
| 2998 | + 0, | ||
| 2999 | + new_path, | ||
| 3000 | + -1, | ||
| 3001 | + (WCHAR*) pos, | ||
| 3002 | + new_pathw_len); | ||
| 3003 | + assert(r == (DWORD) new_pathw_len); | ||
| 3004 | + req->fs.info.new_pathw = (WCHAR*) pos; | ||
| 3005 | + pos += r * sizeof(WCHAR); | ||
| 3006 | } else { | ||
| 3007 | req->fs.info.new_pathw = NULL; | ||
| 3008 | } | ||
| 3009 | @@ -284,8 +232,8 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||
| 3010 | req->path = path; | ||
| 3011 | if (path != NULL && copy_path) { | ||
| 3012 | memcpy(pos, path, path_len); | ||
| 3013 | - assert(path_len == buf_sz - (pos - buf) * sizeof(WCHAR)); | ||
| 3014 | - req->path = (char*) pos; | ||
| 3015 | + assert(path_len == buf_sz - (pos - buf)); | ||
| 3016 | + req->path = pos; | ||
| 3017 | } | ||
| 3018 | |||
| 3019 | req->flags |= UV_FS_FREE_PATHS; | ||
| 3020 | @@ -311,115 +259,57 @@ INLINE static void uv__fs_req_init(uv_loop_t* loop, uv_fs_t* req, | ||
| 3021 | } | ||
| 3022 | |||
| 3023 | |||
| 3024 | -static int32_t fs__get_surrogate_value(const WCHAR* w_source_ptr, | ||
| 3025 | - size_t w_source_len) { | ||
| 3026 | - WCHAR u; | ||
| 3027 | - WCHAR next; | ||
| 3028 | - | ||
| 3029 | - u = w_source_ptr[0]; | ||
| 3030 | - if (u >= 0xD800 && u <= 0xDBFF && w_source_len > 1) { | ||
| 3031 | - next = w_source_ptr[1]; | ||
| 3032 | - if (next >= 0xDC00 && next <= 0xDFFF) | ||
| 3033 | - return 0x10000 + ((u - 0xD800) << 10) + (next - 0xDC00); | ||
| 3034 | - } | ||
| 3035 | - return u; | ||
| 3036 | -} | ||
| 3037 | - | ||
| 3038 | - | ||
| 3039 | -static size_t fs__get_length_wide(const WCHAR* w_source_ptr, | ||
| 3040 | - size_t w_source_len) { | ||
| 3041 | - size_t target_len; | ||
| 3042 | - int32_t code_point; | ||
| 3043 | +static int fs__wide_to_utf8(WCHAR* w_source_ptr, | ||
| 3044 | + DWORD w_source_len, | ||
| 3045 | + char** target_ptr, | ||
| 3046 | + uint64_t* target_len_ptr) { | ||
| 3047 | + int r; | ||
| 3048 | + int target_len; | ||
| 3049 | + char* target; | ||
| 3050 | + target_len = WideCharToMultiByte(CP_UTF8, | ||
| 3051 | + 0, | ||
| 3052 | + w_source_ptr, | ||
| 3053 | + w_source_len, | ||
| 3054 | + NULL, | ||
| 3055 | + 0, | ||
| 3056 | + NULL, | ||
| 3057 | + NULL); | ||
| 3058 | |||
| 3059 | - target_len = 0; | ||
| 3060 | - for (; w_source_len; w_source_len--, w_source_ptr++) { | ||
| 3061 | - code_point = fs__get_surrogate_value(w_source_ptr, w_source_len); | ||
| 3062 | - /* Can be invalid UTF-8 but must be valid WTF-8. */ | ||
| 3063 | - assert(code_point >= 0); | ||
| 3064 | - if (code_point < 0x80) | ||
| 3065 | - target_len += 1; | ||
| 3066 | - else if (code_point < 0x800) | ||
| 3067 | - target_len += 2; | ||
| 3068 | - else if (code_point < 0x10000) | ||
| 3069 | - target_len += 3; | ||
| 3070 | - else { | ||
| 3071 | - target_len += 4; | ||
| 3072 | - w_source_ptr++; | ||
| 3073 | - w_source_len--; | ||
| 3074 | - } | ||
| 3075 | + if (target_len == 0) { | ||
| 3076 | + return -1; | ||
| 3077 | } | ||
| 3078 | - return target_len; | ||
| 3079 | -} | ||
| 3080 | |||
| 3081 | - | ||
| 3082 | -static int fs__wide_to_wtf8(WCHAR* w_source_ptr, | ||
| 3083 | - size_t w_source_len, | ||
| 3084 | - char** target_ptr, | ||
| 3085 | - size_t* target_len_ptr) { | ||
| 3086 | - size_t target_len; | ||
| 3087 | - char* target; | ||
| 3088 | - int32_t code_point; | ||
| 3089 | - | ||
| 3090 | - /* If *target_ptr is provided, then *target_len_ptr must be its length | ||
| 3091 | - * (excluding space for null), otherwise we will compute the target_len_ptr | ||
| 3092 | - * length and may return a new allocation in *target_ptr if target_ptr is | ||
| 3093 | - * provided. */ | ||
| 3094 | - if (target_ptr == NULL || *target_ptr == NULL) { | ||
| 3095 | - target_len = fs__get_length_wide(w_source_ptr, w_source_len); | ||
| 3096 | - if (target_len_ptr != NULL) | ||
| 3097 | - *target_len_ptr = target_len; | ||
| 3098 | - } else { | ||
| 3099 | - target_len = *target_len_ptr; | ||
| 3100 | + if (target_len_ptr != NULL) { | ||
| 3101 | + *target_len_ptr = target_len; | ||
| 3102 | } | ||
| 3103 | |||
| 3104 | - if (target_ptr == NULL) | ||
| 3105 | + if (target_ptr == NULL) { | ||
| 3106 | return 0; | ||
| 3107 | - | ||
| 3108 | - if (*target_ptr == NULL) { | ||
| 3109 | - target = uv__malloc(target_len + 1); | ||
| 3110 | - if (target == NULL) { | ||
| 3111 | - SetLastError(ERROR_OUTOFMEMORY); | ||
| 3112 | - return -1; | ||
| 3113 | - } | ||
| 3114 | - *target_ptr = target; | ||
| 3115 | - } else { | ||
| 3116 | - target = *target_ptr; | ||
| 3117 | - } | ||
| 3118 | - | ||
| 3119 | - for (; w_source_len; w_source_len--, w_source_ptr++) { | ||
| 3120 | - code_point = fs__get_surrogate_value(w_source_ptr, w_source_len); | ||
| 3121 | - /* Can be invalid UTF-8 but must be valid WTF-8. */ | ||
| 3122 | - assert(code_point >= 0); | ||
| 3123 | - | ||
| 3124 | - if (code_point < 0x80) { | ||
| 3125 | - *target++ = code_point; | ||
| 3126 | - } else if (code_point < 0x800) { | ||
| 3127 | - *target++ = 0xC0 | (code_point >> 6); | ||
| 3128 | - *target++ = 0x80 | (code_point & 0x3F); | ||
| 3129 | - } else if (code_point < 0x10000) { | ||
| 3130 | - *target++ = 0xE0 | (code_point >> 12); | ||
| 3131 | - *target++ = 0x80 | ((code_point >> 6) & 0x3F); | ||
| 3132 | - *target++ = 0x80 | (code_point & 0x3F); | ||
| 3133 | - } else { | ||
| 3134 | - *target++ = 0xF0 | (code_point >> 18); | ||
| 3135 | - *target++ = 0x80 | ((code_point >> 12) & 0x3F); | ||
| 3136 | - *target++ = 0x80 | ((code_point >> 6) & 0x3F); | ||
| 3137 | - *target++ = 0x80 | (code_point & 0x3F); | ||
| 3138 | - w_source_ptr++; | ||
| 3139 | - w_source_len--; | ||
| 3140 | - } | ||
| 3141 | } | ||
| 3142 | - assert((size_t) (target - *target_ptr) == target_len); | ||
| 3143 | |||
| 3144 | - *target++ = '\0'; | ||
| 3145 | + target = uv__malloc(target_len + 1); | ||
| 3146 | + if (target == NULL) { | ||
| 3147 | + SetLastError(ERROR_OUTOFMEMORY); | ||
| 3148 | + return -1; | ||
| 3149 | + } | ||
| 3150 | |||
| 3151 | + r = WideCharToMultiByte(CP_UTF8, | ||
| 3152 | + 0, | ||
| 3153 | + w_source_ptr, | ||
| 3154 | + w_source_len, | ||
| 3155 | + target, | ||
| 3156 | + target_len, | ||
| 3157 | + NULL, | ||
| 3158 | + NULL); | ||
| 3159 | + assert(r == target_len); | ||
| 3160 | + target[target_len] = '\0'; | ||
| 3161 | + *target_ptr = target; | ||
| 3162 | return 0; | ||
| 3163 | } | ||
| 3164 | |||
| 3165 | |||
| 3166 | -INLINE static int fs__readlink_handle(HANDLE handle, | ||
| 3167 | - char** target_ptr, | ||
| 3168 | - size_t* target_len_ptr) { | ||
| 3169 | +INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr, | ||
| 3170 | + uint64_t* target_len_ptr) { | ||
| 3171 | char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; | ||
| 3172 | REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer; | ||
| 3173 | WCHAR* w_target; | ||
| 3174 | @@ -549,8 +439,7 @@ INLINE static int fs__readlink_handle(HANDLE handle, | ||
| 3175 | return -1; | ||
| 3176 | } | ||
| 3177 | |||
| 3178 | - assert(target_ptr == NULL || *target_ptr == NULL); | ||
| 3179 | - return fs__wide_to_wtf8(w_target, w_target_len, target_ptr, target_len_ptr); | ||
| 3180 | + return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr); | ||
| 3181 | } | ||
| 3182 | |||
| 3183 | |||
| 3184 | @@ -1540,8 +1429,7 @@ void fs__scandir(uv_fs_t* req) { | ||
| 3185 | uv__dirent_t* dirent; | ||
| 3186 | |||
| 3187 | size_t wchar_len; | ||
| 3188 | - size_t wtf8_len; | ||
| 3189 | - char* wtf8; | ||
| 3190 | + size_t utf8_len; | ||
| 3191 | |||
| 3192 | /* Obtain a pointer to the current directory entry. */ | ||
| 3193 | position += next_entry_offset; | ||
| 3194 | @@ -1568,8 +1456,11 @@ void fs__scandir(uv_fs_t* req) { | ||
| 3195 | info->FileName[1] == L'.') | ||
| 3196 | continue; | ||
| 3197 | |||
| 3198 | - /* Compute the space required to store the filename as WTF-8. */ | ||
| 3199 | - wtf8_len = fs__get_length_wide(&info->FileName[0], wchar_len); | ||
| 3200 | + /* Compute the space required to store the filename as UTF-8. */ | ||
| 3201 | + utf8_len = WideCharToMultiByte( | ||
| 3202 | + CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL); | ||
| 3203 | + if (utf8_len == 0) | ||
| 3204 | + goto win32_error; | ||
| 3205 | |||
| 3206 | /* Resize the dirent array if needed. */ | ||
| 3207 | if (dirents_used >= dirents_size) { | ||
| 3208 | @@ -1589,17 +1480,26 @@ void fs__scandir(uv_fs_t* req) { | ||
| 3209 | * includes room for the first character of the filename, but `utf8_len` | ||
| 3210 | * doesn't count the NULL terminator at this point. | ||
| 3211 | */ | ||
| 3212 | - dirent = uv__malloc(sizeof *dirent + wtf8_len); | ||
| 3213 | + dirent = uv__malloc(sizeof *dirent + utf8_len); | ||
| 3214 | if (dirent == NULL) | ||
| 3215 | goto out_of_memory_error; | ||
| 3216 | |||
| 3217 | dirents[dirents_used++] = dirent; | ||
| 3218 | |||
| 3219 | /* Convert file name to UTF-8. */ | ||
| 3220 | - wtf8 = &dirent->d_name[0]; | ||
| 3221 | - if (fs__wide_to_wtf8(&info->FileName[0], wchar_len, &wtf8, &wtf8_len) == -1) | ||
| 3222 | + if (WideCharToMultiByte(CP_UTF8, | ||
| 3223 | + 0, | ||
| 3224 | + &info->FileName[0], | ||
| 3225 | + wchar_len, | ||
| 3226 | + &dirent->d_name[0], | ||
| 3227 | + utf8_len, | ||
| 3228 | + NULL, | ||
| 3229 | + NULL) == 0) | ||
| 3230 | goto win32_error; | ||
| 3231 | |||
| 3232 | + /* Add a null terminator to the filename. */ | ||
| 3233 | + dirent->d_name[utf8_len] = '\0'; | ||
| 3234 | + | ||
| 3235 | /* Fill out the type field. */ | ||
| 3236 | if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE) | ||
| 3237 | dirent->d_type = UV__DT_CHAR; | ||
| 3238 | @@ -1808,7 +1708,6 @@ void fs__closedir(uv_fs_t* req) { | ||
| 3239 | |||
| 3240 | INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, | ||
| 3241 | int do_lstat) { | ||
| 3242 | - size_t target_length = 0; | ||
| 3243 | FILE_FS_DEVICE_INFORMATION device_info; | ||
| 3244 | FILE_ALL_INFORMATION file_info; | ||
| 3245 | FILE_FS_VOLUME_INFORMATION volume_info; | ||
| 3246 | @@ -1904,10 +1803,9 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, | ||
| 3247 | * to be treated as a regular file. The higher level lstat function will | ||
| 3248 | * detect this failure and retry without do_lstat if appropriate. | ||
| 3249 | */ | ||
| 3250 | - if (fs__readlink_handle(handle, NULL, &target_length) != 0) | ||
| 3251 | + if (fs__readlink_handle(handle, NULL, &statbuf->st_size) != 0) | ||
| 3252 | return -1; | ||
| 3253 | statbuf->st_mode |= S_IFLNK; | ||
| 3254 | - statbuf->st_size = target_length; | ||
| 3255 | } | ||
| 3256 | |||
| 3257 | if (statbuf->st_mode == 0) { | ||
| 3258 | @@ -2763,7 +2661,6 @@ static void fs__readlink(uv_fs_t* req) { | ||
| 3259 | return; | ||
| 3260 | } | ||
| 3261 | |||
| 3262 | - assert(req->ptr == NULL); | ||
| 3263 | if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) { | ||
| 3264 | DWORD error = GetLastError(); | ||
| 3265 | SET_REQ_WIN32_ERROR(req, error); | ||
| 3266 | @@ -2823,8 +2720,7 @@ static ssize_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) { | ||
| 3267 | return -1; | ||
| 3268 | } | ||
| 3269 | |||
| 3270 | - assert(*realpath_ptr == NULL); | ||
| 3271 | - r = fs__wide_to_wtf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL); | ||
| 3272 | + r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL); | ||
| 3273 | uv__free(w_realpath_buf); | ||
| 3274 | return r; | ||
| 3275 | } | ||
| 3276 | @@ -2844,7 +2740,6 @@ static void fs__realpath(uv_fs_t* req) { | ||
| 3277 | return; | ||
| 3278 | } | ||
| 3279 | |||
| 3280 | - assert(req->ptr == NULL); | ||
| 3281 | if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) { | ||
| 3282 | CloseHandle(handle); | ||
| 3283 | SET_REQ_WIN32_ERROR(req, GetLastError()); | ||
| 3284 | diff --git a/deps/uv/src/win/handle-inl.h b/deps/uv/src/win/handle-inl.h | ||
| 3285 | index 4722e85790a..5c843c241ef 100644 | ||
| 3286 | --- a/deps/uv/src/win/handle-inl.h | ||
| 3287 | +++ b/deps/uv/src/win/handle-inl.h | ||
| 3288 | @@ -75,7 +75,7 @@ | ||
| 3289 | |||
| 3290 | #define uv__handle_close(handle) \ | ||
| 3291 | do { \ | ||
| 3292 | - uv__queue_remove(&(handle)->handle_queue); \ | ||
| 3293 | + QUEUE_REMOVE(&(handle)->handle_queue); \ | ||
| 3294 | uv__active_handle_rm((uv_handle_t*) (handle)); \ | ||
| 3295 | \ | ||
| 3296 | (handle)->flags |= UV_HANDLE_CLOSED; \ | ||
| 3297 | diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c | ||
| 3298 | index f0cac382256..5e4276387ac 100644 | ||
| 3299 | --- a/deps/uv/src/win/pipe.c | ||
| 3300 | +++ b/deps/uv/src/win/pipe.c | ||
| 3301 | @@ -55,7 +55,7 @@ static const int pipe_prefix_len = sizeof(pipe_prefix) - 1; | ||
| 3302 | typedef struct { | ||
| 3303 | uv__ipc_socket_xfer_type_t xfer_type; | ||
| 3304 | uv__ipc_socket_xfer_info_t xfer_info; | ||
| 3305 | - struct uv__queue member; | ||
| 3306 | + QUEUE member; | ||
| 3307 | } uv__ipc_xfer_queue_item_t; | ||
| 3308 | |||
| 3309 | /* IPC frame header flags. */ | ||
| 3310 | @@ -111,7 +111,7 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { | ||
| 3311 | handle->name = NULL; | ||
| 3312 | handle->pipe.conn.ipc_remote_pid = 0; | ||
| 3313 | handle->pipe.conn.ipc_data_frame.payload_remaining = 0; | ||
| 3314 | - uv__queue_init(&handle->pipe.conn.ipc_xfer_queue); | ||
| 3315 | + QUEUE_INIT(&handle->pipe.conn.ipc_xfer_queue); | ||
| 3316 | handle->pipe.conn.ipc_xfer_queue_length = 0; | ||
| 3317 | handle->ipc = ipc; | ||
| 3318 | handle->pipe.conn.non_overlapped_writes_tail = NULL; | ||
| 3319 | @@ -637,13 +637,13 @@ void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) { | ||
| 3320 | |||
| 3321 | if (handle->flags & UV_HANDLE_CONNECTION) { | ||
| 3322 | /* Free pending sockets */ | ||
| 3323 | - while (!uv__queue_empty(&handle->pipe.conn.ipc_xfer_queue)) { | ||
| 3324 | - struct uv__queue* q; | ||
| 3325 | + while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) { | ||
| 3326 | + QUEUE* q; | ||
| 3327 | SOCKET socket; | ||
| 3328 | |||
| 3329 | - q = uv__queue_head(&handle->pipe.conn.ipc_xfer_queue); | ||
| 3330 | - uv__queue_remove(q); | ||
| 3331 | - xfer_queue_item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member); | ||
| 3332 | + q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue); | ||
| 3333 | + QUEUE_REMOVE(q); | ||
| 3334 | + xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member); | ||
| 3335 | |||
| 3336 | /* Materialize socket and close it */ | ||
| 3337 | socket = WSASocketW(FROM_PROTOCOL_INFO, | ||
| 3338 | @@ -1124,20 +1124,20 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) { | ||
| 3339 | uv_loop_t* loop = server->loop; | ||
| 3340 | uv_pipe_t* pipe_client; | ||
| 3341 | uv_pipe_accept_t* req; | ||
| 3342 | - struct uv__queue* q; | ||
| 3343 | + QUEUE* q; | ||
| 3344 | uv__ipc_xfer_queue_item_t* item; | ||
| 3345 | int err; | ||
| 3346 | |||
| 3347 | if (server->ipc) { | ||
| 3348 | - if (uv__queue_empty(&server->pipe.conn.ipc_xfer_queue)) { | ||
| 3349 | + if (QUEUE_EMPTY(&server->pipe.conn.ipc_xfer_queue)) { | ||
| 3350 | /* No valid pending sockets. */ | ||
| 3351 | return WSAEWOULDBLOCK; | ||
| 3352 | } | ||
| 3353 | |||
| 3354 | - q = uv__queue_head(&server->pipe.conn.ipc_xfer_queue); | ||
| 3355 | - uv__queue_remove(q); | ||
| 3356 | + q = QUEUE_HEAD(&server->pipe.conn.ipc_xfer_queue); | ||
| 3357 | + QUEUE_REMOVE(q); | ||
| 3358 | server->pipe.conn.ipc_xfer_queue_length--; | ||
| 3359 | - item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member); | ||
| 3360 | + item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member); | ||
| 3361 | |||
| 3362 | err = uv__tcp_xfer_import( | ||
| 3363 | (uv_tcp_t*) client, item->xfer_type, &item->xfer_info); | ||
| 3364 | @@ -1891,7 +1891,7 @@ static void uv__pipe_queue_ipc_xfer_info( | ||
| 3365 | item->xfer_type = xfer_type; | ||
| 3366 | item->xfer_info = *xfer_info; | ||
| 3367 | |||
| 3368 | - uv__queue_insert_tail(&handle->pipe.conn.ipc_xfer_queue, &item->member); | ||
| 3369 | + QUEUE_INSERT_TAIL(&handle->pipe.conn.ipc_xfer_queue, &item->member); | ||
| 3370 | handle->pipe.conn.ipc_xfer_queue_length++; | ||
| 3371 | } | ||
| 3372 | |||
| 3373 | diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c | ||
| 3374 | index 187f36e2a61..6b282e0b501 100644 | ||
| 3375 | --- a/deps/uv/src/win/tcp.c | ||
| 3376 | +++ b/deps/uv/src/win/tcp.c | ||
| 3377 | @@ -175,14 +175,14 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) { | ||
| 3378 | sock = socket(domain, SOCK_STREAM, 0); | ||
| 3379 | if (sock == INVALID_SOCKET) { | ||
| 3380 | err = WSAGetLastError(); | ||
| 3381 | - uv__queue_remove(&handle->handle_queue); | ||
| 3382 | + QUEUE_REMOVE(&handle->handle_queue); | ||
| 3383 | return uv_translate_sys_error(err); | ||
| 3384 | } | ||
| 3385 | |||
| 3386 | err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0); | ||
| 3387 | if (err) { | ||
| 3388 | closesocket(sock); | ||
| 3389 | - uv__queue_remove(&handle->handle_queue); | ||
| 3390 | + QUEUE_REMOVE(&handle->handle_queue); | ||
| 3391 | return uv_translate_sys_error(err); | ||
| 3392 | } | ||
| 3393 | |||
| 3394 | diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c | ||
| 3395 | index eab53842d4f..8a982d1907d 100644 | ||
| 3396 | --- a/deps/uv/src/win/udp.c | ||
| 3397 | +++ b/deps/uv/src/win/udp.c | ||
| 3398 | @@ -146,14 +146,14 @@ int uv__udp_init_ex(uv_loop_t* loop, | ||
| 3399 | sock = socket(domain, SOCK_DGRAM, 0); | ||
| 3400 | if (sock == INVALID_SOCKET) { | ||
| 3401 | err = WSAGetLastError(); | ||
| 3402 | - uv__queue_remove(&handle->handle_queue); | ||
| 3403 | + QUEUE_REMOVE(&handle->handle_queue); | ||
| 3404 | return uv_translate_sys_error(err); | ||
| 3405 | } | ||
| 3406 | |||
| 3407 | err = uv__udp_set_socket(handle->loop, handle, sock, domain); | ||
| 3408 | if (err) { | ||
| 3409 | closesocket(sock); | ||
| 3410 | - uv__queue_remove(&handle->handle_queue); | ||
| 3411 | + QUEUE_REMOVE(&handle->handle_queue); | ||
| 3412 | return uv_translate_sys_error(err); | ||
| 3413 | } | ||
| 3414 | } | ||
diff --git a/meta-oe/recipes-devtools/nodejs/nodejs/0002-Revert-io_uring-changes-from-libuv-1.45.0.patch b/meta-oe/recipes-devtools/nodejs/nodejs/0002-Revert-io_uring-changes-from-libuv-1.45.0.patch new file mode 100644 index 0000000000..77cd53b759 --- /dev/null +++ b/meta-oe/recipes-devtools/nodejs/nodejs/0002-Revert-io_uring-changes-from-libuv-1.45.0.patch | |||
| @@ -0,0 +1,1803 @@ | |||
| 1 | From 6d2ef4c8ba2304ee4941a6719b3ad9bd63e415a9 Mon Sep 17 00:00:00 2001 | ||
| 2 | From: Martin Jansa <martin.jansa@gmail.com> | ||
| 3 | Date: Wed, 18 Oct 2023 21:09:44 +0200 | ||
| 4 | Subject: [PATCH] Revert io_uring changes from libuv-1.45.0 | ||
| 5 | |||
| 6 | This reverts https://github.com/libuv/libuv/pull/3952/commits/26c79a942b92573a1388c0ee8a6ad4397f009318 | ||
| 7 | |||
| 8 | Included in nodejs-20.3.0 with the libuv upgrade to 1.45.0 in: | ||
| 9 | https://github.com/nodejs/node/commit/bfcb3d1d9a876f399013d326bd65804f9eda77e4 | ||
| 10 | |||
| 11 | Reverted libuv commits: | ||
| 12 | Revert "linux: fix WRITEV with lots of bufs using io_uring (#4004)" | ||
| 13 | This reverts commit ef6a9a624df0a00687037474025a3608472f722a. | ||
| 14 | Revert "linux: work around EOWNERDEAD io_uring kernel bug (#4002)" | ||
| 15 | This reverts commit d23a20f62cc50b9fd7694992263f1d296d8f5cb4. | ||
| 16 | Revert "unix: handle CQ overflow in iou ring (#3991)" | ||
| 17 | This reverts commit 30fc896cc1b5822e9f1eb462587fe4b368a6215c. | ||
| 18 | Revert "unix: constrained_memory should return UINT64_MAX (#3753)" | ||
| 19 | This reverts commit 6ad347fae4520f39520d34bd7c7f5ddafab13a69. | ||
| 20 | Revert "linux: use io_uring to batch epoll_ctl calls (#3979)" | ||
| 21 | This reverts commit 6e073ef5daf93b708a654008959b823b58029e88. | ||
| 22 | Revert "linux: fix logic bug in sqe ring space check (#3980)" | ||
| 23 | This reverts commit f27208224084fc972b9d2802486d97ef31b51a39. | ||
| 24 | Revert "src: fix events/events_waiting metrics counter (#3957)" | ||
| 25 | This reverts commit e02642cf3b768b2c58a41f97fa38507e032ae415. | ||
| 26 | Revert "linux: remove bug workaround for obsolete kernels (#3965)" | ||
| 27 | This reverts commit 1c935a34454167b23f8eef7f0f63d7119f0de747. | ||
| 28 | Revert "linux: add IORING_OP_CLOSE support (#3964)" | ||
| 29 | This reverts commit dfae365f844e127621128a76bce7165e3f99a8d9. | ||
| 30 | Revert "linux: add IORING_OP_OPENAT support (#3963)" | ||
| 31 | This reverts commit 5ca5e475bb1711e65323ef1594a31818e5a1a9eb. | ||
| 32 | Revert "linux: fix academic valgrind warning (#3960)" | ||
| 33 | This reverts commit a7ff759ca1deacb2e0e6ae3c2d3dce91cc637dfe. | ||
| 34 | Revert "linux: introduce io_uring support (#3952)" | ||
| 35 | This reverts commit d2c31f429b87b476a7f1344d145dad4752a406d4. | ||
| 36 | |||
| 37 | Dropped deps/uv/docs deps/uv/test changes as these dirs aren't included | ||
| 38 | in nodejs tarballs. | ||
| 39 | |||
| 40 | Signed-off-by: Martin Jansa <martin.jansa@gmail.com> | ||
| 41 | --- | ||
| 42 | Upstream-Status: Inappropriate [OE specific] | ||
| 43 | |||
| 44 | deps/uv/docs/src/fs.rst | 6 - | ||
| 45 | deps/uv/docs/src/misc.rst | 5 +- | ||
| 46 | deps/uv/src/threadpool.c | 21 - | ||
| 47 | deps/uv/src/unix/aix.c | 11 +- | ||
| 48 | deps/uv/src/unix/fs.c | 57 +- | ||
| 49 | deps/uv/src/unix/internal.h | 23 - | ||
| 50 | deps/uv/src/unix/kqueue.c | 10 +- | ||
| 51 | deps/uv/src/unix/linux.c | 998 +++----------------------- | ||
| 52 | deps/uv/src/unix/os390.c | 11 +- | ||
| 53 | deps/uv/src/unix/posix-poll.c | 11 +- | ||
| 54 | deps/uv/src/uv-common.h | 28 - | ||
| 55 | deps/uv/src/win/core.c | 20 +- | ||
| 56 | deps/uv/test/test-fs.c | 33 - | ||
| 57 | deps/uv/test/test-list.h | 4 - | ||
| 58 | deps/uv/test/test-metrics.c | 151 ---- | ||
| 59 | deps/uv/test/test-threadpool-cancel.c | 34 +- | ||
| 60 | 16 files changed, 138 insertions(+), 1285 deletions(-) | ||
| 61 | |||
| 62 | diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c | ||
| 63 | index 51962bf0021..a3da53026f9 100644 | ||
| 64 | --- a/deps/uv/src/threadpool.c | ||
| 65 | +++ b/deps/uv/src/threadpool.c | ||
| 66 | @@ -275,13 +275,9 @@ void uv__work_submit(uv_loop_t* loop, | ||
| 67 | } | ||
| 68 | |||
| 69 | |||
| 70 | -/* TODO(bnoordhuis) teach libuv how to cancel file operations | ||
| 71 | - * that go through io_uring instead of the thread pool. | ||
| 72 | - */ | ||
| 73 | static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||
| 74 | int cancelled; | ||
| 75 | |||
| 76 | - uv_once(&once, init_once); /* Ensure |mutex| is initialized. */ | ||
| 77 | uv_mutex_lock(&mutex); | ||
| 78 | uv_mutex_lock(&w->loop->wq_mutex); | ||
| 79 | |||
| 80 | @@ -311,15 +307,12 @@ void uv__work_done(uv_async_t* handle) { | ||
| 81 | QUEUE* q; | ||
| 82 | QUEUE wq; | ||
| 83 | int err; | ||
| 84 | - int nevents; | ||
| 85 | |||
| 86 | loop = container_of(handle, uv_loop_t, wq_async); | ||
| 87 | uv_mutex_lock(&loop->wq_mutex); | ||
| 88 | QUEUE_MOVE(&loop->wq, &wq); | ||
| 89 | uv_mutex_unlock(&loop->wq_mutex); | ||
| 90 | |||
| 91 | - nevents = 0; | ||
| 92 | - | ||
| 93 | while (!QUEUE_EMPTY(&wq)) { | ||
| 94 | q = QUEUE_HEAD(&wq); | ||
| 95 | QUEUE_REMOVE(q); | ||
| 96 | @@ -327,20 +320,6 @@ void uv__work_done(uv_async_t* handle) { | ||
| 97 | w = container_of(q, struct uv__work, wq); | ||
| 98 | err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; | ||
| 99 | w->done(w, err); | ||
| 100 | - nevents++; | ||
| 101 | - } | ||
| 102 | - | ||
| 103 | - /* This check accomplishes 2 things: | ||
| 104 | - * 1. Even if the queue was empty, the call to uv__work_done() should count | ||
| 105 | - * as an event. Which will have been added by the event loop when | ||
| 106 | - * calling this callback. | ||
| 107 | - * 2. Prevents accidental wrap around in case nevents == 0 events == 0. | ||
| 108 | - */ | ||
| 109 | - if (nevents > 1) { | ||
| 110 | - /* Subtract 1 to counter the call to uv__work_done(). */ | ||
| 111 | - uv__metrics_inc_events(loop, nevents - 1); | ||
| 112 | - if (uv__get_internal_fields(loop)->current_timeout == 0) | ||
| 113 | - uv__metrics_inc_events_waiting(loop, nevents - 1); | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | diff --git a/deps/uv/src/unix/aix.c b/deps/uv/src/unix/aix.c | ||
| 118 | index f1afbed49ec..b855282ebc8 100644 | ||
| 119 | --- a/deps/uv/src/unix/aix.c | ||
| 120 | +++ b/deps/uv/src/unix/aix.c | ||
| 121 | @@ -131,7 +131,6 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) { | ||
| 122 | |||
| 123 | |||
| 124 | void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 125 | - uv__loop_internal_fields_t* lfields; | ||
| 126 | struct pollfd events[1024]; | ||
| 127 | struct pollfd pqry; | ||
| 128 | struct pollfd* pe; | ||
| 129 | @@ -155,8 +154,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 130 | return; | ||
| 131 | } | ||
| 132 | |||
| 133 | - lfields = uv__get_internal_fields(loop); | ||
| 134 | - | ||
| 135 | while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 136 | q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 137 | QUEUE_REMOVE(q); | ||
| 138 | @@ -220,7 +217,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 139 | base = loop->time; | ||
| 140 | count = 48; /* Benchmarks suggest this gives the best throughput. */ | ||
| 141 | |||
| 142 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 143 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 144 | reset_timeout = 1; | ||
| 145 | user_timeout = timeout; | ||
| 146 | timeout = 0; | ||
| 147 | @@ -235,12 +232,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 148 | if (timeout != 0) | ||
| 149 | uv__metrics_set_provider_entry_time(loop); | ||
| 150 | |||
| 151 | - /* Store the current timeout in a location that's globally accessible so | ||
| 152 | - * other locations like uv__work_done() can determine whether the queue | ||
| 153 | - * of events in the callback were waiting when poll was called. | ||
| 154 | - */ | ||
| 155 | - lfields->current_timeout = timeout; | ||
| 156 | - | ||
| 157 | nfds = pollset_poll(loop->backend_fd, | ||
| 158 | events, | ||
| 159 | ARRAY_SIZE(events), | ||
| 160 | diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c | ||
| 161 | index 00d385c24b7..c696a8d5500 100644 | ||
| 162 | --- a/deps/uv/src/unix/fs.c | ||
| 163 | +++ b/deps/uv/src/unix/fs.c | ||
| 164 | @@ -61,11 +61,10 @@ | ||
| 165 | #endif | ||
| 166 | |||
| 167 | #if defined(__linux__) | ||
| 168 | -# include <sys/sendfile.h> | ||
| 169 | -# include <sys/utsname.h> | ||
| 170 | +# include "sys/utsname.h" | ||
| 171 | #endif | ||
| 172 | |||
| 173 | -#if defined(__sun) | ||
| 174 | +#if defined(__linux__) || defined(__sun) | ||
| 175 | # include <sys/sendfile.h> | ||
| 176 | # include <sys/sysmacros.h> | ||
| 177 | #endif | ||
| 178 | @@ -1554,7 +1553,26 @@ static int uv__fs_statx(int fd, | ||
| 179 | return UV_ENOSYS; | ||
| 180 | } | ||
| 181 | |||
| 182 | - uv__statx_to_stat(&statxbuf, buf); | ||
| 183 | + buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor); | ||
| 184 | + buf->st_mode = statxbuf.stx_mode; | ||
| 185 | + buf->st_nlink = statxbuf.stx_nlink; | ||
| 186 | + buf->st_uid = statxbuf.stx_uid; | ||
| 187 | + buf->st_gid = statxbuf.stx_gid; | ||
| 188 | + buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor); | ||
| 189 | + buf->st_ino = statxbuf.stx_ino; | ||
| 190 | + buf->st_size = statxbuf.stx_size; | ||
| 191 | + buf->st_blksize = statxbuf.stx_blksize; | ||
| 192 | + buf->st_blocks = statxbuf.stx_blocks; | ||
| 193 | + buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec; | ||
| 194 | + buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec; | ||
| 195 | + buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec; | ||
| 196 | + buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec; | ||
| 197 | + buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec; | ||
| 198 | + buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec; | ||
| 199 | + buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec; | ||
| 200 | + buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec; | ||
| 201 | + buf->st_flags = 0; | ||
| 202 | + buf->st_gen = 0; | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | #else | ||
| 206 | @@ -1798,9 +1816,6 @@ int uv_fs_chown(uv_loop_t* loop, | ||
| 207 | int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 208 | INIT(CLOSE); | ||
| 209 | req->file = file; | ||
| 210 | - if (cb != NULL) | ||
| 211 | - if (uv__iou_fs_close(loop, req)) | ||
| 212 | - return 0; | ||
| 213 | POST; | ||
| 214 | } | ||
| 215 | |||
| 216 | @@ -1848,9 +1863,6 @@ int uv_fs_lchown(uv_loop_t* loop, | ||
| 217 | int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 218 | INIT(FDATASYNC); | ||
| 219 | req->file = file; | ||
| 220 | - if (cb != NULL) | ||
| 221 | - if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1)) | ||
| 222 | - return 0; | ||
| 223 | POST; | ||
| 224 | } | ||
| 225 | |||
| 226 | @@ -1858,9 +1870,6 @@ int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 227 | int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 228 | INIT(FSTAT); | ||
| 229 | req->file = file; | ||
| 230 | - if (cb != NULL) | ||
| 231 | - if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0)) | ||
| 232 | - return 0; | ||
| 233 | POST; | ||
| 234 | } | ||
| 235 | |||
| 236 | @@ -1868,9 +1877,6 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 237 | int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { | ||
| 238 | INIT(FSYNC); | ||
| 239 | req->file = file; | ||
| 240 | - if (cb != NULL) | ||
| 241 | - if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0)) | ||
| 242 | - return 0; | ||
| 243 | POST; | ||
| 244 | } | ||
| 245 | |||
| 246 | @@ -1917,9 +1923,6 @@ int uv_fs_lutime(uv_loop_t* loop, | ||
| 247 | int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { | ||
| 248 | INIT(LSTAT); | ||
| 249 | PATH; | ||
| 250 | - if (cb != NULL) | ||
| 251 | - if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1)) | ||
| 252 | - return 0; | ||
| 253 | POST; | ||
| 254 | } | ||
| 255 | |||
| 256 | @@ -1981,9 +1984,6 @@ int uv_fs_open(uv_loop_t* loop, | ||
| 257 | PATH; | ||
| 258 | req->flags = flags; | ||
| 259 | req->mode = mode; | ||
| 260 | - if (cb != NULL) | ||
| 261 | - if (uv__iou_fs_open(loop, req)) | ||
| 262 | - return 0; | ||
| 263 | POST; | ||
| 264 | } | ||
| 265 | |||
| 266 | @@ -2012,11 +2012,6 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, | ||
| 267 | memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); | ||
| 268 | |||
| 269 | req->off = off; | ||
| 270 | - | ||
| 271 | - if (cb != NULL) | ||
| 272 | - if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1)) | ||
| 273 | - return 0; | ||
| 274 | - | ||
| 275 | POST; | ||
| 276 | } | ||
| 277 | |||
| 278 | @@ -2124,9 +2119,6 @@ int uv_fs_sendfile(uv_loop_t* loop, | ||
| 279 | int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { | ||
| 280 | INIT(STAT); | ||
| 281 | PATH; | ||
| 282 | - if (cb != NULL) | ||
| 283 | - if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0)) | ||
| 284 | - return 0; | ||
| 285 | POST; | ||
| 286 | } | ||
| 287 | |||
| 288 | @@ -2190,11 +2182,6 @@ int uv_fs_write(uv_loop_t* loop, | ||
| 289 | memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); | ||
| 290 | |||
| 291 | req->off = off; | ||
| 292 | - | ||
| 293 | - if (cb != NULL) | ||
| 294 | - if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0)) | ||
| 295 | - return 0; | ||
| 296 | - | ||
| 297 | POST; | ||
| 298 | } | ||
| 299 | |||
| 300 | diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h | ||
| 301 | index 6c5822e6a0d..d439ae6dd3d 100644 | ||
| 302 | --- a/deps/uv/src/unix/internal.h | ||
| 303 | +++ b/deps/uv/src/unix/internal.h | ||
| 304 | @@ -329,28 +329,6 @@ int uv__random_getentropy(void* buf, size_t buflen); | ||
| 305 | int uv__random_readpath(const char* path, void* buf, size_t buflen); | ||
| 306 | int uv__random_sysctl(void* buf, size_t buflen); | ||
| 307 | |||
| 308 | -/* io_uring */ | ||
| 309 | -#ifdef __linux__ | ||
| 310 | -int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req); | ||
| 311 | -int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||
| 312 | - uv_fs_t* req, | ||
| 313 | - uint32_t fsync_flags); | ||
| 314 | -int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req); | ||
| 315 | -int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||
| 316 | - uv_fs_t* req, | ||
| 317 | - int is_read); | ||
| 318 | -int uv__iou_fs_statx(uv_loop_t* loop, | ||
| 319 | - uv_fs_t* req, | ||
| 320 | - int is_fstat, | ||
| 321 | - int is_lstat); | ||
| 322 | -#else | ||
| 323 | -#define uv__iou_fs_close(loop, req) 0 | ||
| 324 | -#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0 | ||
| 325 | -#define uv__iou_fs_open(loop, req) 0 | ||
| 326 | -#define uv__iou_fs_read_or_write(loop, req, is_read) 0 | ||
| 327 | -#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0 | ||
| 328 | -#endif | ||
| 329 | - | ||
| 330 | #if defined(__APPLE__) | ||
| 331 | int uv___stream_fd(const uv_stream_t* handle); | ||
| 332 | #define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle))) | ||
| 333 | @@ -427,7 +405,6 @@ int uv__statx(int dirfd, | ||
| 334 | int flags, | ||
| 335 | unsigned int mask, | ||
| 336 | struct uv__statx* statxbuf); | ||
| 337 | -void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf); | ||
| 338 | ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags); | ||
| 339 | #endif | ||
| 340 | |||
| 341 | diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c | ||
| 342 | index 82916d65933..deb486bae7a 100644 | ||
| 343 | --- a/deps/uv/src/unix/kqueue.c | ||
| 344 | +++ b/deps/uv/src/unix/kqueue.c | ||
| 345 | @@ -127,7 +127,6 @@ static void uv__kqueue_delete(int kqfd, const struct kevent *ev) { | ||
| 346 | |||
| 347 | |||
| 348 | void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 349 | - uv__loop_internal_fields_t* lfields; | ||
| 350 | struct kevent events[1024]; | ||
| 351 | struct kevent* ev; | ||
| 352 | struct timespec spec; | ||
| 353 | @@ -156,7 +155,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 354 | return; | ||
| 355 | } | ||
| 356 | |||
| 357 | - lfields = uv__get_internal_fields(loop); | ||
| 358 | nevents = 0; | ||
| 359 | |||
| 360 | while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 361 | @@ -224,7 +222,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 362 | base = loop->time; | ||
| 363 | count = 48; /* Benchmarks suggest this gives the best throughput. */ | ||
| 364 | |||
| 365 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 366 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 367 | reset_timeout = 1; | ||
| 368 | user_timeout = timeout; | ||
| 369 | timeout = 0; | ||
| 370 | @@ -247,12 +245,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 371 | if (pset != NULL) | ||
| 372 | pthread_sigmask(SIG_BLOCK, pset, NULL); | ||
| 373 | |||
| 374 | - /* Store the current timeout in a location that's globally accessible so | ||
| 375 | - * other locations like uv__work_done() can determine whether the queue | ||
| 376 | - * of events in the callback were waiting when poll was called. | ||
| 377 | - */ | ||
| 378 | - lfields->current_timeout = timeout; | ||
| 379 | - | ||
| 380 | nfds = kevent(loop->backend_fd, | ||
| 381 | events, | ||
| 382 | nevents, | ||
| 383 | diff --git a/deps/uv/src/unix/linux.c b/deps/uv/src/unix/linux.c | ||
| 384 | index 5f84ad0eea3..343e37f2527 100644 | ||
| 385 | --- a/deps/uv/src/unix/linux.c | ||
| 386 | +++ b/deps/uv/src/unix/linux.c | ||
| 387 | @@ -27,8 +27,6 @@ | ||
| 388 | #include "internal.h" | ||
| 389 | |||
| 390 | #include <inttypes.h> | ||
| 391 | -#include <stdatomic.h> | ||
| 392 | -#include <stddef.h> /* offsetof */ | ||
| 393 | #include <stdint.h> | ||
| 394 | #include <stdio.h> | ||
| 395 | #include <stdlib.h> | ||
| 396 | @@ -40,29 +38,15 @@ | ||
| 397 | #include <net/if.h> | ||
| 398 | #include <sys/epoll.h> | ||
| 399 | #include <sys/inotify.h> | ||
| 400 | -#include <sys/mman.h> | ||
| 401 | #include <sys/param.h> | ||
| 402 | #include <sys/prctl.h> | ||
| 403 | #include <sys/stat.h> | ||
| 404 | #include <sys/syscall.h> | ||
| 405 | #include <sys/sysinfo.h> | ||
| 406 | -#include <sys/sysmacros.h> | ||
| 407 | #include <sys/types.h> | ||
| 408 | #include <time.h> | ||
| 409 | #include <unistd.h> | ||
| 410 | |||
| 411 | -#ifndef __NR_io_uring_setup | ||
| 412 | -# define __NR_io_uring_setup 425 | ||
| 413 | -#endif | ||
| 414 | - | ||
| 415 | -#ifndef __NR_io_uring_enter | ||
| 416 | -# define __NR_io_uring_enter 426 | ||
| 417 | -#endif | ||
| 418 | - | ||
| 419 | -#ifndef __NR_io_uring_register | ||
| 420 | -# define __NR_io_uring_register 427 | ||
| 421 | -#endif | ||
| 422 | - | ||
| 423 | #ifndef __NR_copy_file_range | ||
| 424 | # if defined(__x86_64__) | ||
| 425 | # define __NR_copy_file_range 326 | ||
| 426 | @@ -132,129 +116,6 @@ | ||
| 427 | # include <netpacket/packet.h> | ||
| 428 | #endif /* HAVE_IFADDRS_H */ | ||
| 429 | |||
| 430 | -enum { | ||
| 431 | - UV__IORING_SETUP_SQPOLL = 2u, | ||
| 432 | -}; | ||
| 433 | - | ||
| 434 | -enum { | ||
| 435 | - UV__IORING_FEAT_SINGLE_MMAP = 1u, | ||
| 436 | - UV__IORING_FEAT_NODROP = 2u, | ||
| 437 | - UV__IORING_FEAT_RSRC_TAGS = 1024u, /* linux v5.13 */ | ||
| 438 | -}; | ||
| 439 | - | ||
| 440 | -enum { | ||
| 441 | - UV__IORING_OP_READV = 1, | ||
| 442 | - UV__IORING_OP_WRITEV = 2, | ||
| 443 | - UV__IORING_OP_FSYNC = 3, | ||
| 444 | - UV__IORING_OP_OPENAT = 18, | ||
| 445 | - UV__IORING_OP_CLOSE = 19, | ||
| 446 | - UV__IORING_OP_STATX = 21, | ||
| 447 | - UV__IORING_OP_EPOLL_CTL = 29, | ||
| 448 | -}; | ||
| 449 | - | ||
| 450 | -enum { | ||
| 451 | - UV__IORING_ENTER_GETEVENTS = 1u, | ||
| 452 | - UV__IORING_ENTER_SQ_WAKEUP = 2u, | ||
| 453 | -}; | ||
| 454 | - | ||
| 455 | -enum { | ||
| 456 | - UV__IORING_SQ_NEED_WAKEUP = 1u, | ||
| 457 | - UV__IORING_SQ_CQ_OVERFLOW = 2u, | ||
| 458 | -}; | ||
| 459 | - | ||
| 460 | -struct uv__io_cqring_offsets { | ||
| 461 | - uint32_t head; | ||
| 462 | - uint32_t tail; | ||
| 463 | - uint32_t ring_mask; | ||
| 464 | - uint32_t ring_entries; | ||
| 465 | - uint32_t overflow; | ||
| 466 | - uint32_t cqes; | ||
| 467 | - uint64_t reserved0; | ||
| 468 | - uint64_t reserved1; | ||
| 469 | -}; | ||
| 470 | - | ||
| 471 | -STATIC_ASSERT(40 == sizeof(struct uv__io_cqring_offsets)); | ||
| 472 | - | ||
| 473 | -struct uv__io_sqring_offsets { | ||
| 474 | - uint32_t head; | ||
| 475 | - uint32_t tail; | ||
| 476 | - uint32_t ring_mask; | ||
| 477 | - uint32_t ring_entries; | ||
| 478 | - uint32_t flags; | ||
| 479 | - uint32_t dropped; | ||
| 480 | - uint32_t array; | ||
| 481 | - uint32_t reserved0; | ||
| 482 | - uint64_t reserved1; | ||
| 483 | -}; | ||
| 484 | - | ||
| 485 | -STATIC_ASSERT(40 == sizeof(struct uv__io_sqring_offsets)); | ||
| 486 | - | ||
| 487 | -struct uv__io_uring_cqe { | ||
| 488 | - uint64_t user_data; | ||
| 489 | - int32_t res; | ||
| 490 | - uint32_t flags; | ||
| 491 | -}; | ||
| 492 | - | ||
| 493 | -STATIC_ASSERT(16 == sizeof(struct uv__io_uring_cqe)); | ||
| 494 | - | ||
| 495 | -struct uv__io_uring_sqe { | ||
| 496 | - uint8_t opcode; | ||
| 497 | - uint8_t flags; | ||
| 498 | - uint16_t ioprio; | ||
| 499 | - int32_t fd; | ||
| 500 | - union { | ||
| 501 | - uint64_t off; | ||
| 502 | - uint64_t addr2; | ||
| 503 | - }; | ||
| 504 | - union { | ||
| 505 | - uint64_t addr; | ||
| 506 | - }; | ||
| 507 | - uint32_t len; | ||
| 508 | - union { | ||
| 509 | - uint32_t rw_flags; | ||
| 510 | - uint32_t fsync_flags; | ||
| 511 | - uint32_t open_flags; | ||
| 512 | - uint32_t statx_flags; | ||
| 513 | - }; | ||
| 514 | - uint64_t user_data; | ||
| 515 | - union { | ||
| 516 | - uint16_t buf_index; | ||
| 517 | - uint64_t pad[3]; | ||
| 518 | - }; | ||
| 519 | -}; | ||
| 520 | - | ||
| 521 | -STATIC_ASSERT(64 == sizeof(struct uv__io_uring_sqe)); | ||
| 522 | -STATIC_ASSERT(0 == offsetof(struct uv__io_uring_sqe, opcode)); | ||
| 523 | -STATIC_ASSERT(1 == offsetof(struct uv__io_uring_sqe, flags)); | ||
| 524 | -STATIC_ASSERT(2 == offsetof(struct uv__io_uring_sqe, ioprio)); | ||
| 525 | -STATIC_ASSERT(4 == offsetof(struct uv__io_uring_sqe, fd)); | ||
| 526 | -STATIC_ASSERT(8 == offsetof(struct uv__io_uring_sqe, off)); | ||
| 527 | -STATIC_ASSERT(16 == offsetof(struct uv__io_uring_sqe, addr)); | ||
| 528 | -STATIC_ASSERT(24 == offsetof(struct uv__io_uring_sqe, len)); | ||
| 529 | -STATIC_ASSERT(28 == offsetof(struct uv__io_uring_sqe, rw_flags)); | ||
| 530 | -STATIC_ASSERT(32 == offsetof(struct uv__io_uring_sqe, user_data)); | ||
| 531 | -STATIC_ASSERT(40 == offsetof(struct uv__io_uring_sqe, buf_index)); | ||
| 532 | - | ||
| 533 | -struct uv__io_uring_params { | ||
| 534 | - uint32_t sq_entries; | ||
| 535 | - uint32_t cq_entries; | ||
| 536 | - uint32_t flags; | ||
| 537 | - uint32_t sq_thread_cpu; | ||
| 538 | - uint32_t sq_thread_idle; | ||
| 539 | - uint32_t features; | ||
| 540 | - uint32_t reserved[4]; | ||
| 541 | - struct uv__io_sqring_offsets sq_off; /* 40 bytes */ | ||
| 542 | - struct uv__io_cqring_offsets cq_off; /* 40 bytes */ | ||
| 543 | -}; | ||
| 544 | - | ||
| 545 | -STATIC_ASSERT(40 + 40 + 40 == sizeof(struct uv__io_uring_params)); | ||
| 546 | -STATIC_ASSERT(40 == offsetof(struct uv__io_uring_params, sq_off)); | ||
| 547 | -STATIC_ASSERT(80 == offsetof(struct uv__io_uring_params, cq_off)); | ||
| 548 | - | ||
| 549 | -STATIC_ASSERT(EPOLL_CTL_ADD < 4); | ||
| 550 | -STATIC_ASSERT(EPOLL_CTL_DEL < 4); | ||
| 551 | -STATIC_ASSERT(EPOLL_CTL_MOD < 4); | ||
| 552 | - | ||
| 553 | struct watcher_list { | ||
| 554 | RB_ENTRY(watcher_list) entry; | ||
| 555 | QUEUE watchers; | ||
| 556 | @@ -276,17 +137,6 @@ static int compare_watchers(const struct watcher_list* a, | ||
| 557 | static void maybe_free_watcher_list(struct watcher_list* w, | ||
| 558 | uv_loop_t* loop); | ||
| 559 | |||
| 560 | -static void uv__epoll_ctl_flush(int epollfd, | ||
| 561 | - struct uv__iou* ctl, | ||
| 562 | - struct epoll_event (*events)[256]); | ||
| 563 | - | ||
| 564 | -static void uv__epoll_ctl_prep(int epollfd, | ||
| 565 | - struct uv__iou* ctl, | ||
| 566 | - struct epoll_event (*events)[256], | ||
| 567 | - int op, | ||
| 568 | - int fd, | ||
| 569 | - struct epoll_event* e); | ||
| 570 | - | ||
| 571 | RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) | ||
| 572 | |||
| 573 | |||
| 574 | @@ -356,187 +206,7 @@ ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) { | ||
| 575 | } | ||
| 576 | |||
| 577 | |||
| 578 | -int uv__io_uring_setup(int entries, struct uv__io_uring_params* params) { | ||
| 579 | - return syscall(__NR_io_uring_setup, entries, params); | ||
| 580 | -} | ||
| 581 | - | ||
| 582 | - | ||
| 583 | -int uv__io_uring_enter(int fd, | ||
| 584 | - unsigned to_submit, | ||
| 585 | - unsigned min_complete, | ||
| 586 | - unsigned flags) { | ||
| 587 | - /* io_uring_enter used to take a sigset_t but it's unused | ||
| 588 | - * in newer kernels unless IORING_ENTER_EXT_ARG is set, | ||
| 589 | - * in which case it takes a struct io_uring_getevents_arg. | ||
| 590 | - */ | ||
| 591 | - return syscall(__NR_io_uring_enter, | ||
| 592 | - fd, | ||
| 593 | - to_submit, | ||
| 594 | - min_complete, | ||
| 595 | - flags, | ||
| 596 | - NULL, | ||
| 597 | - 0L); | ||
| 598 | -} | ||
| 599 | - | ||
| 600 | - | ||
| 601 | -int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) { | ||
| 602 | - return syscall(__NR_io_uring_register, fd, opcode, arg, nargs); | ||
| 603 | -} | ||
| 604 | - | ||
| 605 | - | ||
| 606 | -static int uv__use_io_uring(void) { | ||
| 607 | - /* Ternary: unknown=0, yes=1, no=-1 */ | ||
| 608 | - static _Atomic int use_io_uring; | ||
| 609 | - char* val; | ||
| 610 | - int use; | ||
| 611 | - | ||
| 612 | - use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); | ||
| 613 | - | ||
| 614 | - if (use == 0) { | ||
| 615 | - val = getenv("UV_USE_IO_URING"); | ||
| 616 | - use = val == NULL || atoi(val) ? 1 : -1; | ||
| 617 | - atomic_store_explicit(&use_io_uring, use, memory_order_relaxed); | ||
| 618 | - } | ||
| 619 | - | ||
| 620 | - return use > 0; | ||
| 621 | -} | ||
| 622 | - | ||
| 623 | - | ||
| 624 | -static void uv__iou_init(int epollfd, | ||
| 625 | - struct uv__iou* iou, | ||
| 626 | - uint32_t entries, | ||
| 627 | - uint32_t flags) { | ||
| 628 | - struct uv__io_uring_params params; | ||
| 629 | - struct epoll_event e; | ||
| 630 | - size_t cqlen; | ||
| 631 | - size_t sqlen; | ||
| 632 | - size_t maxlen; | ||
| 633 | - size_t sqelen; | ||
| 634 | - uint32_t i; | ||
| 635 | - char* sq; | ||
| 636 | - char* sqe; | ||
| 637 | - int ringfd; | ||
| 638 | - | ||
| 639 | - sq = MAP_FAILED; | ||
| 640 | - sqe = MAP_FAILED; | ||
| 641 | - | ||
| 642 | - if (!uv__use_io_uring()) | ||
| 643 | - return; | ||
| 644 | - | ||
| 645 | - /* SQPOLL required CAP_SYS_NICE until linux v5.12 relaxed that requirement. | ||
| 646 | - * Mostly academic because we check for a v5.13 kernel afterwards anyway. | ||
| 647 | - */ | ||
| 648 | - memset(¶ms, 0, sizeof(params)); | ||
| 649 | - params.flags = flags; | ||
| 650 | - | ||
| 651 | - if (flags & UV__IORING_SETUP_SQPOLL) | ||
| 652 | - params.sq_thread_idle = 10; /* milliseconds */ | ||
| 653 | - | ||
| 654 | - /* Kernel returns a file descriptor with O_CLOEXEC flag set. */ | ||
| 655 | - ringfd = uv__io_uring_setup(entries, ¶ms); | ||
| 656 | - if (ringfd == -1) | ||
| 657 | - return; | ||
| 658 | - | ||
| 659 | - /* IORING_FEAT_RSRC_TAGS is used to detect linux v5.13 but what we're | ||
| 660 | - * actually detecting is whether IORING_OP_STATX works with SQPOLL. | ||
| 661 | - */ | ||
| 662 | - if (!(params.features & UV__IORING_FEAT_RSRC_TAGS)) | ||
| 663 | - goto fail; | ||
| 664 | - | ||
| 665 | - /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */ | ||
| 666 | - if (!(params.features & UV__IORING_FEAT_SINGLE_MMAP)) | ||
| 667 | - goto fail; | ||
| 668 | - | ||
| 669 | - /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */ | ||
| 670 | - if (!(params.features & UV__IORING_FEAT_NODROP)) | ||
| 671 | - goto fail; | ||
| 672 | - | ||
| 673 | - sqlen = params.sq_off.array + params.sq_entries * sizeof(uint32_t); | ||
| 674 | - cqlen = | ||
| 675 | - params.cq_off.cqes + params.cq_entries * sizeof(struct uv__io_uring_cqe); | ||
| 676 | - maxlen = sqlen < cqlen ? cqlen : sqlen; | ||
| 677 | - sqelen = params.sq_entries * sizeof(struct uv__io_uring_sqe); | ||
| 678 | - | ||
| 679 | - sq = mmap(0, | ||
| 680 | - maxlen, | ||
| 681 | - PROT_READ | PROT_WRITE, | ||
| 682 | - MAP_SHARED | MAP_POPULATE, | ||
| 683 | - ringfd, | ||
| 684 | - 0); /* IORING_OFF_SQ_RING */ | ||
| 685 | - | ||
| 686 | - sqe = mmap(0, | ||
| 687 | - sqelen, | ||
| 688 | - PROT_READ | PROT_WRITE, | ||
| 689 | - MAP_SHARED | MAP_POPULATE, | ||
| 690 | - ringfd, | ||
| 691 | - 0x10000000ull); /* IORING_OFF_SQES */ | ||
| 692 | - | ||
| 693 | - if (sq == MAP_FAILED || sqe == MAP_FAILED) | ||
| 694 | - goto fail; | ||
| 695 | - | ||
| 696 | - if (flags & UV__IORING_SETUP_SQPOLL) { | ||
| 697 | - /* Only interested in completion events. To get notified when | ||
| 698 | - * the kernel pulls items from the submission ring, add POLLOUT. | ||
| 699 | - */ | ||
| 700 | - memset(&e, 0, sizeof(e)); | ||
| 701 | - e.events = POLLIN; | ||
| 702 | - e.data.fd = ringfd; | ||
| 703 | - | ||
| 704 | - if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ringfd, &e)) | ||
| 705 | - goto fail; | ||
| 706 | - } | ||
| 707 | - | ||
| 708 | - iou->sqhead = (uint32_t*) (sq + params.sq_off.head); | ||
| 709 | - iou->sqtail = (uint32_t*) (sq + params.sq_off.tail); | ||
| 710 | - iou->sqmask = *(uint32_t*) (sq + params.sq_off.ring_mask); | ||
| 711 | - iou->sqarray = (uint32_t*) (sq + params.sq_off.array); | ||
| 712 | - iou->sqflags = (uint32_t*) (sq + params.sq_off.flags); | ||
| 713 | - iou->cqhead = (uint32_t*) (sq + params.cq_off.head); | ||
| 714 | - iou->cqtail = (uint32_t*) (sq + params.cq_off.tail); | ||
| 715 | - iou->cqmask = *(uint32_t*) (sq + params.cq_off.ring_mask); | ||
| 716 | - iou->sq = sq; | ||
| 717 | - iou->cqe = sq + params.cq_off.cqes; | ||
| 718 | - iou->sqe = sqe; | ||
| 719 | - iou->sqlen = sqlen; | ||
| 720 | - iou->cqlen = cqlen; | ||
| 721 | - iou->maxlen = maxlen; | ||
| 722 | - iou->sqelen = sqelen; | ||
| 723 | - iou->ringfd = ringfd; | ||
| 724 | - iou->in_flight = 0; | ||
| 725 | - | ||
| 726 | - for (i = 0; i <= iou->sqmask; i++) | ||
| 727 | - iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */ | ||
| 728 | - | ||
| 729 | - return; | ||
| 730 | - | ||
| 731 | -fail: | ||
| 732 | - if (sq != MAP_FAILED) | ||
| 733 | - munmap(sq, maxlen); | ||
| 734 | - | ||
| 735 | - if (sqe != MAP_FAILED) | ||
| 736 | - munmap(sqe, sqelen); | ||
| 737 | - | ||
| 738 | - uv__close(ringfd); | ||
| 739 | -} | ||
| 740 | - | ||
| 741 | - | ||
| 742 | -static void uv__iou_delete(struct uv__iou* iou) { | ||
| 743 | - if (iou->ringfd != -1) { | ||
| 744 | - munmap(iou->sq, iou->maxlen); | ||
| 745 | - munmap(iou->sqe, iou->sqelen); | ||
| 746 | - uv__close(iou->ringfd); | ||
| 747 | - iou->ringfd = -1; | ||
| 748 | - } | ||
| 749 | -} | ||
| 750 | - | ||
| 751 | - | ||
| 752 | int uv__platform_loop_init(uv_loop_t* loop) { | ||
| 753 | - uv__loop_internal_fields_t* lfields; | ||
| 754 | - | ||
| 755 | - lfields = uv__get_internal_fields(loop); | ||
| 756 | - lfields->ctl.ringfd = -1; | ||
| 757 | - lfields->iou.ringfd = -1; | ||
| 758 | - | ||
| 759 | loop->inotify_watchers = NULL; | ||
| 760 | loop->inotify_fd = -1; | ||
| 761 | loop->backend_fd = epoll_create1(O_CLOEXEC); | ||
| 762 | @@ -544,9 +214,6 @@ int uv__platform_loop_init(uv_loop_t* loop) { | ||
| 763 | if (loop->backend_fd == -1) | ||
| 764 | return UV__ERR(errno); | ||
| 765 | |||
| 766 | - uv__iou_init(loop->backend_fd, &lfields->iou, 64, UV__IORING_SETUP_SQPOLL); | ||
| 767 | - uv__iou_init(loop->backend_fd, &lfields->ctl, 256, 0); | ||
| 768 | - | ||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 772 | @@ -559,8 +226,6 @@ int uv__io_fork(uv_loop_t* loop) { | ||
| 773 | |||
| 774 | uv__close(loop->backend_fd); | ||
| 775 | loop->backend_fd = -1; | ||
| 776 | - | ||
| 777 | - /* TODO(bnoordhuis) Loses items from the submission and completion rings. */ | ||
| 778 | uv__platform_loop_delete(loop); | ||
| 779 | |||
| 780 | err = uv__platform_loop_init(loop); | ||
| 781 | @@ -572,62 +237,42 @@ int uv__io_fork(uv_loop_t* loop) { | ||
| 782 | |||
| 783 | |||
| 784 | void uv__platform_loop_delete(uv_loop_t* loop) { | ||
| 785 | - uv__loop_internal_fields_t* lfields; | ||
| 786 | - | ||
| 787 | - lfields = uv__get_internal_fields(loop); | ||
| 788 | - uv__iou_delete(&lfields->ctl); | ||
| 789 | - uv__iou_delete(&lfields->iou); | ||
| 790 | - | ||
| 791 | - if (loop->inotify_fd != -1) { | ||
| 792 | - uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); | ||
| 793 | - uv__close(loop->inotify_fd); | ||
| 794 | - loop->inotify_fd = -1; | ||
| 795 | - } | ||
| 796 | + if (loop->inotify_fd == -1) return; | ||
| 797 | + uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); | ||
| 798 | + uv__close(loop->inotify_fd); | ||
| 799 | + loop->inotify_fd = -1; | ||
| 800 | } | ||
| 801 | |||
| 802 | |||
| 803 | -struct uv__invalidate { | ||
| 804 | - struct epoll_event (*prep)[256]; | ||
| 805 | - struct epoll_event* events; | ||
| 806 | - int nfds; | ||
| 807 | -}; | ||
| 808 | - | ||
| 809 | - | ||
| 810 | void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { | ||
| 811 | - uv__loop_internal_fields_t* lfields; | ||
| 812 | - struct uv__invalidate* inv; | ||
| 813 | + struct epoll_event* events; | ||
| 814 | struct epoll_event dummy; | ||
| 815 | - int i; | ||
| 816 | + uintptr_t i; | ||
| 817 | + uintptr_t nfds; | ||
| 818 | |||
| 819 | - lfields = uv__get_internal_fields(loop); | ||
| 820 | - inv = lfields->inv; | ||
| 821 | + assert(loop->watchers != NULL); | ||
| 822 | + assert(fd >= 0); | ||
| 823 | |||
| 824 | - /* Invalidate events with same file descriptor */ | ||
| 825 | - if (inv != NULL) | ||
| 826 | - for (i = 0; i < inv->nfds; i++) | ||
| 827 | - if (inv->events[i].data.fd == fd) | ||
| 828 | - inv->events[i].data.fd = -1; | ||
| 829 | + events = (struct epoll_event*) loop->watchers[loop->nwatchers]; | ||
| 830 | + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; | ||
| 831 | + if (events != NULL) | ||
| 832 | + /* Invalidate events with same file descriptor */ | ||
| 833 | + for (i = 0; i < nfds; i++) | ||
| 834 | + if (events[i].data.fd == fd) | ||
| 835 | + events[i].data.fd = -1; | ||
| 836 | |||
| 837 | /* Remove the file descriptor from the epoll. | ||
| 838 | * This avoids a problem where the same file description remains open | ||
| 839 | * in another process, causing repeated junk epoll events. | ||
| 840 | * | ||
| 841 | * We pass in a dummy epoll_event, to work around a bug in old kernels. | ||
| 842 | - * | ||
| 843 | - * Work around a bug in kernels 3.10 to 3.19 where passing a struct that | ||
| 844 | - * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. | ||
| 845 | */ | ||
| 846 | - memset(&dummy, 0, sizeof(dummy)); | ||
| 847 | - | ||
| 848 | - if (inv == NULL) { | ||
| 849 | + if (loop->backend_fd >= 0) { | ||
| 850 | + /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that | ||
| 851 | + * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. | ||
| 852 | + */ | ||
| 853 | + memset(&dummy, 0, sizeof(dummy)); | ||
| 854 | epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); | ||
| 855 | - } else { | ||
| 856 | - uv__epoll_ctl_prep(loop->backend_fd, | ||
| 857 | - &lfields->ctl, | ||
| 858 | - inv->prep, | ||
| 859 | - EPOLL_CTL_DEL, | ||
| 860 | - fd, | ||
| 861 | - &dummy); | ||
| 862 | } | ||
| 863 | } | ||
| 864 | |||
| 865 | @@ -653,454 +298,27 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) { | ||
| 866 | } | ||
| 867 | |||
| 868 | |||
| 869 | -/* Caller must initialize SQE and call uv__iou_submit(). */ | ||
| 870 | -static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, | ||
| 871 | - uv_loop_t* loop, | ||
| 872 | - uv_fs_t* req) { | ||
| 873 | - struct uv__io_uring_sqe* sqe; | ||
| 874 | - uint32_t head; | ||
| 875 | - uint32_t tail; | ||
| 876 | - uint32_t mask; | ||
| 877 | - uint32_t slot; | ||
| 878 | - | ||
| 879 | - if (iou->ringfd == -1) | ||
| 880 | - return NULL; | ||
| 881 | - | ||
| 882 | - head = atomic_load_explicit((_Atomic uint32_t*) iou->sqhead, | ||
| 883 | - memory_order_acquire); | ||
| 884 | - tail = *iou->sqtail; | ||
| 885 | - mask = iou->sqmask; | ||
| 886 | - | ||
| 887 | - if ((head & mask) == ((tail + 1) & mask)) | ||
| 888 | - return NULL; /* No room in ring buffer. TODO(bnoordhuis) maybe flush it? */ | ||
| 889 | - | ||
| 890 | - slot = tail & mask; | ||
| 891 | - sqe = iou->sqe; | ||
| 892 | - sqe = &sqe[slot]; | ||
| 893 | - memset(sqe, 0, sizeof(*sqe)); | ||
| 894 | - sqe->user_data = (uintptr_t) req; | ||
| 895 | - | ||
| 896 | - /* Pacify uv_cancel(). */ | ||
| 897 | - req->work_req.loop = loop; | ||
| 898 | - req->work_req.work = NULL; | ||
| 899 | - req->work_req.done = NULL; | ||
| 900 | - QUEUE_INIT(&req->work_req.wq); | ||
| 901 | - | ||
| 902 | - uv__req_register(loop, req); | ||
| 903 | - iou->in_flight++; | ||
| 904 | - | ||
| 905 | - return sqe; | ||
| 906 | -} | ||
| 907 | - | ||
| 908 | - | ||
| 909 | -static void uv__iou_submit(struct uv__iou* iou) { | ||
| 910 | - uint32_t flags; | ||
| 911 | - | ||
| 912 | - atomic_store_explicit((_Atomic uint32_t*) iou->sqtail, | ||
| 913 | - *iou->sqtail + 1, | ||
| 914 | - memory_order_release); | ||
| 915 | - | ||
| 916 | - flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, | ||
| 917 | - memory_order_acquire); | ||
| 918 | - | ||
| 919 | - if (flags & UV__IORING_SQ_NEED_WAKEUP) | ||
| 920 | - if (uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_SQ_WAKEUP)) | ||
| 921 | - if (errno != EOWNERDEAD) /* Kernel bug. Harmless, ignore. */ | ||
| 922 | - perror("libuv: io_uring_enter(wakeup)"); /* Can't happen. */ | ||
| 923 | -} | ||
| 924 | - | ||
| 925 | - | ||
| 926 | -int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) { | ||
| 927 | - struct uv__io_uring_sqe* sqe; | ||
| 928 | - struct uv__iou* iou; | ||
| 929 | - | ||
| 930 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 931 | - | ||
| 932 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 933 | - if (sqe == NULL) | ||
| 934 | - return 0; | ||
| 935 | - | ||
| 936 | - sqe->fd = req->file; | ||
| 937 | - sqe->opcode = UV__IORING_OP_CLOSE; | ||
| 938 | - | ||
| 939 | - uv__iou_submit(iou); | ||
| 940 | - | ||
| 941 | - return 1; | ||
| 942 | -} | ||
| 943 | - | ||
| 944 | - | ||
| 945 | -int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||
| 946 | - uv_fs_t* req, | ||
| 947 | - uint32_t fsync_flags) { | ||
| 948 | - struct uv__io_uring_sqe* sqe; | ||
| 949 | - struct uv__iou* iou; | ||
| 950 | - | ||
| 951 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 952 | - | ||
| 953 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 954 | - if (sqe == NULL) | ||
| 955 | - return 0; | ||
| 956 | - | ||
| 957 | - /* Little known fact: setting seq->off and seq->len turns | ||
| 958 | - * it into an asynchronous sync_file_range() operation. | ||
| 959 | - */ | ||
| 960 | - sqe->fd = req->file; | ||
| 961 | - sqe->fsync_flags = fsync_flags; | ||
| 962 | - sqe->opcode = UV__IORING_OP_FSYNC; | ||
| 963 | - | ||
| 964 | - uv__iou_submit(iou); | ||
| 965 | - | ||
| 966 | - return 1; | ||
| 967 | -} | ||
| 968 | - | ||
| 969 | - | ||
| 970 | -int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { | ||
| 971 | - struct uv__io_uring_sqe* sqe; | ||
| 972 | - struct uv__iou* iou; | ||
| 973 | - | ||
| 974 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 975 | - | ||
| 976 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 977 | - if (sqe == NULL) | ||
| 978 | - return 0; | ||
| 979 | - | ||
| 980 | - sqe->addr = (uintptr_t) req->path; | ||
| 981 | - sqe->fd = AT_FDCWD; | ||
| 982 | - sqe->len = req->mode; | ||
| 983 | - sqe->opcode = UV__IORING_OP_OPENAT; | ||
| 984 | - sqe->open_flags = req->flags | O_CLOEXEC; | ||
| 985 | - | ||
| 986 | - uv__iou_submit(iou); | ||
| 987 | - | ||
| 988 | - return 1; | ||
| 989 | -} | ||
| 990 | - | ||
| 991 | - | ||
| 992 | -int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||
| 993 | - uv_fs_t* req, | ||
| 994 | - int is_read) { | ||
| 995 | - struct uv__io_uring_sqe* sqe; | ||
| 996 | - struct uv__iou* iou; | ||
| 997 | - | ||
| 998 | - /* For the moment, if iovcnt is greater than IOV_MAX, fallback to the | ||
| 999 | - * threadpool. In the future we might take advantage of IOSQE_IO_LINK. */ | ||
| 1000 | - if (req->nbufs > IOV_MAX) | ||
| 1001 | - return 0; | ||
| 1002 | - | ||
| 1003 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1004 | - | ||
| 1005 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1006 | - if (sqe == NULL) | ||
| 1007 | - return 0; | ||
| 1008 | - | ||
| 1009 | - sqe->addr = (uintptr_t) req->bufs; | ||
| 1010 | - sqe->fd = req->file; | ||
| 1011 | - sqe->len = req->nbufs; | ||
| 1012 | - sqe->off = req->off < 0 ? -1 : req->off; | ||
| 1013 | - sqe->opcode = is_read ? UV__IORING_OP_READV : UV__IORING_OP_WRITEV; | ||
| 1014 | - | ||
| 1015 | - uv__iou_submit(iou); | ||
| 1016 | - | ||
| 1017 | - return 1; | ||
| 1018 | -} | ||
| 1019 | - | ||
| 1020 | - | ||
| 1021 | -int uv__iou_fs_statx(uv_loop_t* loop, | ||
| 1022 | - uv_fs_t* req, | ||
| 1023 | - int is_fstat, | ||
| 1024 | - int is_lstat) { | ||
| 1025 | - struct uv__io_uring_sqe* sqe; | ||
| 1026 | - struct uv__statx* statxbuf; | ||
| 1027 | - struct uv__iou* iou; | ||
| 1028 | - | ||
| 1029 | - statxbuf = uv__malloc(sizeof(*statxbuf)); | ||
| 1030 | - if (statxbuf == NULL) | ||
| 1031 | - return 0; | ||
| 1032 | - | ||
| 1033 | - iou = &uv__get_internal_fields(loop)->iou; | ||
| 1034 | - | ||
| 1035 | - sqe = uv__iou_get_sqe(iou, loop, req); | ||
| 1036 | - if (sqe == NULL) { | ||
| 1037 | - uv__free(statxbuf); | ||
| 1038 | - return 0; | ||
| 1039 | - } | ||
| 1040 | - | ||
| 1041 | - req->ptr = statxbuf; | ||
| 1042 | - | ||
| 1043 | - sqe->addr = (uintptr_t) req->path; | ||
| 1044 | - sqe->addr2 = (uintptr_t) statxbuf; | ||
| 1045 | - sqe->fd = AT_FDCWD; | ||
| 1046 | - sqe->len = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */ | ||
| 1047 | - sqe->opcode = UV__IORING_OP_STATX; | ||
| 1048 | - | ||
| 1049 | - if (is_fstat) { | ||
| 1050 | - sqe->addr = (uintptr_t) ""; | ||
| 1051 | - sqe->fd = req->file; | ||
| 1052 | - sqe->statx_flags |= 0x1000; /* AT_EMPTY_PATH */ | ||
| 1053 | - } | ||
| 1054 | - | ||
| 1055 | - if (is_lstat) | ||
| 1056 | - sqe->statx_flags |= AT_SYMLINK_NOFOLLOW; | ||
| 1057 | - | ||
| 1058 | - uv__iou_submit(iou); | ||
| 1059 | - | ||
| 1060 | - return 1; | ||
| 1061 | -} | ||
| 1062 | - | ||
| 1063 | - | ||
| 1064 | -void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf) { | ||
| 1065 | - buf->st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor); | ||
| 1066 | - buf->st_mode = statxbuf->stx_mode; | ||
| 1067 | - buf->st_nlink = statxbuf->stx_nlink; | ||
| 1068 | - buf->st_uid = statxbuf->stx_uid; | ||
| 1069 | - buf->st_gid = statxbuf->stx_gid; | ||
| 1070 | - buf->st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor); | ||
| 1071 | - buf->st_ino = statxbuf->stx_ino; | ||
| 1072 | - buf->st_size = statxbuf->stx_size; | ||
| 1073 | - buf->st_blksize = statxbuf->stx_blksize; | ||
| 1074 | - buf->st_blocks = statxbuf->stx_blocks; | ||
| 1075 | - buf->st_atim.tv_sec = statxbuf->stx_atime.tv_sec; | ||
| 1076 | - buf->st_atim.tv_nsec = statxbuf->stx_atime.tv_nsec; | ||
| 1077 | - buf->st_mtim.tv_sec = statxbuf->stx_mtime.tv_sec; | ||
| 1078 | - buf->st_mtim.tv_nsec = statxbuf->stx_mtime.tv_nsec; | ||
| 1079 | - buf->st_ctim.tv_sec = statxbuf->stx_ctime.tv_sec; | ||
| 1080 | - buf->st_ctim.tv_nsec = statxbuf->stx_ctime.tv_nsec; | ||
| 1081 | - buf->st_birthtim.tv_sec = statxbuf->stx_btime.tv_sec; | ||
| 1082 | - buf->st_birthtim.tv_nsec = statxbuf->stx_btime.tv_nsec; | ||
| 1083 | - buf->st_flags = 0; | ||
| 1084 | - buf->st_gen = 0; | ||
| 1085 | -} | ||
| 1086 | - | ||
| 1087 | - | ||
| 1088 | -static void uv__iou_fs_statx_post(uv_fs_t* req) { | ||
| 1089 | - struct uv__statx* statxbuf; | ||
| 1090 | - uv_stat_t* buf; | ||
| 1091 | - | ||
| 1092 | - buf = &req->statbuf; | ||
| 1093 | - statxbuf = req->ptr; | ||
| 1094 | - req->ptr = NULL; | ||
| 1095 | - | ||
| 1096 | - if (req->result == 0) { | ||
| 1097 | - uv__msan_unpoison(statxbuf, sizeof(*statxbuf)); | ||
| 1098 | - uv__statx_to_stat(statxbuf, buf); | ||
| 1099 | - req->ptr = buf; | ||
| 1100 | - } | ||
| 1101 | - | ||
| 1102 | - uv__free(statxbuf); | ||
| 1103 | -} | ||
| 1104 | - | ||
| 1105 | - | ||
| 1106 | -static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { | ||
| 1107 | - struct uv__io_uring_cqe* cqe; | ||
| 1108 | - struct uv__io_uring_cqe* e; | ||
| 1109 | - uv_fs_t* req; | ||
| 1110 | - uint32_t head; | ||
| 1111 | - uint32_t tail; | ||
| 1112 | - uint32_t mask; | ||
| 1113 | - uint32_t i; | ||
| 1114 | - uint32_t flags; | ||
| 1115 | - int nevents; | ||
| 1116 | - int rc; | ||
| 1117 | - | ||
| 1118 | - head = *iou->cqhead; | ||
| 1119 | - tail = atomic_load_explicit((_Atomic uint32_t*) iou->cqtail, | ||
| 1120 | - memory_order_acquire); | ||
| 1121 | - mask = iou->cqmask; | ||
| 1122 | - cqe = iou->cqe; | ||
| 1123 | - nevents = 0; | ||
| 1124 | - | ||
| 1125 | - for (i = head; i != tail; i++) { | ||
| 1126 | - e = &cqe[i & mask]; | ||
| 1127 | - | ||
| 1128 | - req = (uv_fs_t*) (uintptr_t) e->user_data; | ||
| 1129 | - assert(req->type == UV_FS); | ||
| 1130 | - | ||
| 1131 | - uv__req_unregister(loop, req); | ||
| 1132 | - iou->in_flight--; | ||
| 1133 | - | ||
| 1134 | - /* io_uring stores error codes as negative numbers, same as libuv. */ | ||
| 1135 | - req->result = e->res; | ||
| 1136 | - | ||
| 1137 | - switch (req->fs_type) { | ||
| 1138 | - case UV_FS_FSTAT: | ||
| 1139 | - case UV_FS_LSTAT: | ||
| 1140 | - case UV_FS_STAT: | ||
| 1141 | - uv__iou_fs_statx_post(req); | ||
| 1142 | - break; | ||
| 1143 | - default: /* Squelch -Wswitch warnings. */ | ||
| 1144 | - break; | ||
| 1145 | - } | ||
| 1146 | - | ||
| 1147 | - uv__metrics_update_idle_time(loop); | ||
| 1148 | - req->cb(req); | ||
| 1149 | - nevents++; | ||
| 1150 | - } | ||
| 1151 | - | ||
| 1152 | - atomic_store_explicit((_Atomic uint32_t*) iou->cqhead, | ||
| 1153 | - tail, | ||
| 1154 | - memory_order_release); | ||
| 1155 | - | ||
| 1156 | - /* Check whether CQE's overflowed, if so enter the kernel to make them | ||
| 1157 | - * available. Don't grab them immediately but in the next loop iteration to | ||
| 1158 | - * avoid loop starvation. */ | ||
| 1159 | - flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, | ||
| 1160 | - memory_order_acquire); | ||
| 1161 | - | ||
| 1162 | - if (flags & UV__IORING_SQ_CQ_OVERFLOW) { | ||
| 1163 | - do | ||
| 1164 | - rc = uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_GETEVENTS); | ||
| 1165 | - while (rc == -1 && errno == EINTR); | ||
| 1166 | - | ||
| 1167 | - if (rc < 0) | ||
| 1168 | - perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */ | ||
| 1169 | - } | ||
| 1170 | - | ||
| 1171 | - uv__metrics_inc_events(loop, nevents); | ||
| 1172 | - if (uv__get_internal_fields(loop)->current_timeout == 0) | ||
| 1173 | - uv__metrics_inc_events_waiting(loop, nevents); | ||
| 1174 | -} | ||
| 1175 | - | ||
| 1176 | - | ||
| 1177 | -static void uv__epoll_ctl_prep(int epollfd, | ||
| 1178 | - struct uv__iou* ctl, | ||
| 1179 | - struct epoll_event (*events)[256], | ||
| 1180 | - int op, | ||
| 1181 | - int fd, | ||
| 1182 | - struct epoll_event* e) { | ||
| 1183 | - struct uv__io_uring_sqe* sqe; | ||
| 1184 | - struct epoll_event* pe; | ||
| 1185 | - uint32_t mask; | ||
| 1186 | - uint32_t slot; | ||
| 1187 | - | ||
| 1188 | - if (ctl->ringfd == -1) { | ||
| 1189 | - if (!epoll_ctl(epollfd, op, fd, e)) | ||
| 1190 | - return; | ||
| 1191 | - | ||
| 1192 | - if (op == EPOLL_CTL_DEL) | ||
| 1193 | - return; /* Ignore errors, may be racing with another thread. */ | ||
| 1194 | - | ||
| 1195 | - if (op != EPOLL_CTL_ADD) | ||
| 1196 | - abort(); | ||
| 1197 | - | ||
| 1198 | - if (errno != EEXIST) | ||
| 1199 | - abort(); | ||
| 1200 | - | ||
| 1201 | - /* File descriptor that's been watched before, update event mask. */ | ||
| 1202 | - if (!epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, e)) | ||
| 1203 | - return; | ||
| 1204 | - | ||
| 1205 | - abort(); | ||
| 1206 | - } else { | ||
| 1207 | - mask = ctl->sqmask; | ||
| 1208 | - slot = (*ctl->sqtail)++ & mask; | ||
| 1209 | - | ||
| 1210 | - pe = &(*events)[slot]; | ||
| 1211 | - *pe = *e; | ||
| 1212 | - | ||
| 1213 | - sqe = ctl->sqe; | ||
| 1214 | - sqe = &sqe[slot]; | ||
| 1215 | - | ||
| 1216 | - memset(sqe, 0, sizeof(*sqe)); | ||
| 1217 | - sqe->addr = (uintptr_t) pe; | ||
| 1218 | - sqe->fd = epollfd; | ||
| 1219 | - sqe->len = op; | ||
| 1220 | - sqe->off = fd; | ||
| 1221 | - sqe->opcode = UV__IORING_OP_EPOLL_CTL; | ||
| 1222 | - sqe->user_data = op | slot << 2 | (int64_t) fd << 32; | ||
| 1223 | - | ||
| 1224 | - if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask)) | ||
| 1225 | - uv__epoll_ctl_flush(epollfd, ctl, events); | ||
| 1226 | - } | ||
| 1227 | -} | ||
| 1228 | - | ||
| 1229 | - | ||
| 1230 | -static void uv__epoll_ctl_flush(int epollfd, | ||
| 1231 | - struct uv__iou* ctl, | ||
| 1232 | - struct epoll_event (*events)[256]) { | ||
| 1233 | - struct epoll_event oldevents[256]; | ||
| 1234 | - struct uv__io_uring_cqe* cqe; | ||
| 1235 | - uint32_t oldslot; | ||
| 1236 | - uint32_t slot; | ||
| 1237 | - uint32_t n; | ||
| 1238 | - int fd; | ||
| 1239 | - int op; | ||
| 1240 | - int rc; | ||
| 1241 | - | ||
| 1242 | - STATIC_ASSERT(sizeof(oldevents) == sizeof(*events)); | ||
| 1243 | - assert(ctl->ringfd != -1); | ||
| 1244 | - assert(*ctl->sqhead != *ctl->sqtail); | ||
| 1245 | - | ||
| 1246 | - n = *ctl->sqtail - *ctl->sqhead; | ||
| 1247 | - do | ||
| 1248 | - rc = uv__io_uring_enter(ctl->ringfd, n, n, UV__IORING_ENTER_GETEVENTS); | ||
| 1249 | - while (rc == -1 && errno == EINTR); | ||
| 1250 | - | ||
| 1251 | - if (rc < 0) | ||
| 1252 | - perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */ | ||
| 1253 | - | ||
| 1254 | - if (rc != (int) n) | ||
| 1255 | - abort(); | ||
| 1256 | - | ||
| 1257 | - assert(*ctl->sqhead == *ctl->sqtail); | ||
| 1258 | - | ||
| 1259 | - memcpy(oldevents, *events, sizeof(*events)); | ||
| 1260 | - | ||
| 1261 | - /* Failed submissions are either EPOLL_CTL_DEL commands for file descriptors | ||
| 1262 | - * that have been closed, or EPOLL_CTL_ADD commands for file descriptors | ||
| 1263 | - * that we are already watching. Ignore the former and retry the latter | ||
| 1264 | - * with EPOLL_CTL_MOD. | ||
| 1265 | - */ | ||
| 1266 | - while (*ctl->cqhead != *ctl->cqtail) { | ||
| 1267 | - slot = (*ctl->cqhead)++ & ctl->cqmask; | ||
| 1268 | - | ||
| 1269 | - cqe = ctl->cqe; | ||
| 1270 | - cqe = &cqe[slot]; | ||
| 1271 | - | ||
| 1272 | - if (cqe->res == 0) | ||
| 1273 | - continue; | ||
| 1274 | - | ||
| 1275 | - fd = cqe->user_data >> 32; | ||
| 1276 | - op = 3 & cqe->user_data; | ||
| 1277 | - oldslot = 255 & (cqe->user_data >> 2); | ||
| 1278 | - | ||
| 1279 | - if (op == EPOLL_CTL_DEL) | ||
| 1280 | - continue; | ||
| 1281 | - | ||
| 1282 | - if (op != EPOLL_CTL_ADD) | ||
| 1283 | - abort(); | ||
| 1284 | - | ||
| 1285 | - if (cqe->res != -EEXIST) | ||
| 1286 | - abort(); | ||
| 1287 | - | ||
| 1288 | - uv__epoll_ctl_prep(epollfd, | ||
| 1289 | - ctl, | ||
| 1290 | - events, | ||
| 1291 | - EPOLL_CTL_MOD, | ||
| 1292 | - fd, | ||
| 1293 | - &oldevents[oldslot]); | ||
| 1294 | - } | ||
| 1295 | -} | ||
| 1296 | - | ||
| 1297 | - | ||
| 1298 | void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1299 | - uv__loop_internal_fields_t* lfields; | ||
| 1300 | + /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes | ||
| 1301 | + * effectively infinite on 32 bits architectures. To avoid blocking | ||
| 1302 | + * indefinitely, we cap the timeout and poll again if necessary. | ||
| 1303 | + * | ||
| 1304 | + * Note that "30 minutes" is a simplification because it depends on | ||
| 1305 | + * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200, | ||
| 1306 | + * that being the largest value I have seen in the wild (and only once.) | ||
| 1307 | + */ | ||
| 1308 | + static const int max_safe_timeout = 1789569; | ||
| 1309 | struct epoll_event events[1024]; | ||
| 1310 | - struct epoll_event prep[256]; | ||
| 1311 | - struct uv__invalidate inv; | ||
| 1312 | struct epoll_event* pe; | ||
| 1313 | struct epoll_event e; | ||
| 1314 | - struct uv__iou* ctl; | ||
| 1315 | - struct uv__iou* iou; | ||
| 1316 | int real_timeout; | ||
| 1317 | QUEUE* q; | ||
| 1318 | uv__io_t* w; | ||
| 1319 | sigset_t* sigmask; | ||
| 1320 | sigset_t sigset; | ||
| 1321 | uint64_t base; | ||
| 1322 | - int have_iou_events; | ||
| 1323 | int have_signals; | ||
| 1324 | int nevents; | ||
| 1325 | - int epollfd; | ||
| 1326 | int count; | ||
| 1327 | int nfds; | ||
| 1328 | int fd; | ||
| 1329 | @@ -1109,9 +327,47 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1330 | int user_timeout; | ||
| 1331 | int reset_timeout; | ||
| 1332 | |||
| 1333 | - lfields = uv__get_internal_fields(loop); | ||
| 1334 | - ctl = &lfields->ctl; | ||
| 1335 | - iou = &lfields->iou; | ||
| 1336 | + if (loop->nfds == 0) { | ||
| 1337 | + assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||
| 1338 | + return; | ||
| 1339 | + } | ||
| 1340 | + | ||
| 1341 | + memset(&e, 0, sizeof(e)); | ||
| 1342 | + | ||
| 1343 | + while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1344 | + q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 1345 | + QUEUE_REMOVE(q); | ||
| 1346 | + QUEUE_INIT(q); | ||
| 1347 | + | ||
| 1348 | + w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 1349 | + assert(w->pevents != 0); | ||
| 1350 | + assert(w->fd >= 0); | ||
| 1351 | + assert(w->fd < (int) loop->nwatchers); | ||
| 1352 | + | ||
| 1353 | + e.events = w->pevents; | ||
| 1354 | + e.data.fd = w->fd; | ||
| 1355 | + | ||
| 1356 | + if (w->events == 0) | ||
| 1357 | + op = EPOLL_CTL_ADD; | ||
| 1358 | + else | ||
| 1359 | + op = EPOLL_CTL_MOD; | ||
| 1360 | + | ||
| 1361 | + /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching | ||
| 1362 | + * events, skip the syscall and squelch the events after epoll_wait(). | ||
| 1363 | + */ | ||
| 1364 | + if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) { | ||
| 1365 | + if (errno != EEXIST) | ||
| 1366 | + abort(); | ||
| 1367 | + | ||
| 1368 | + assert(op == EPOLL_CTL_ADD); | ||
| 1369 | + | ||
| 1370 | + /* We've reactivated a file descriptor that's been watched before. */ | ||
| 1371 | + if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e)) | ||
| 1372 | + abort(); | ||
| 1373 | + } | ||
| 1374 | + | ||
| 1375 | + w->events = w->pevents; | ||
| 1376 | + } | ||
| 1377 | |||
| 1378 | sigmask = NULL; | ||
| 1379 | if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { | ||
| 1380 | @@ -1125,7 +381,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1381 | count = 48; /* Benchmarks suggest this gives the best throughput. */ | ||
| 1382 | real_timeout = timeout; | ||
| 1383 | |||
| 1384 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 1385 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 1386 | reset_timeout = 1; | ||
| 1387 | user_timeout = timeout; | ||
| 1388 | timeout = 0; | ||
| 1389 | @@ -1134,56 +390,24 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1390 | user_timeout = 0; | ||
| 1391 | } | ||
| 1392 | |||
| 1393 | - epollfd = loop->backend_fd; | ||
| 1394 | - | ||
| 1395 | - memset(&e, 0, sizeof(e)); | ||
| 1396 | - | ||
| 1397 | - while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1398 | - q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 1399 | - w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||
| 1400 | - QUEUE_REMOVE(q); | ||
| 1401 | - QUEUE_INIT(q); | ||
| 1402 | - | ||
| 1403 | - op = EPOLL_CTL_MOD; | ||
| 1404 | - if (w->events == 0) | ||
| 1405 | - op = EPOLL_CTL_ADD; | ||
| 1406 | - | ||
| 1407 | - w->events = w->pevents; | ||
| 1408 | - e.events = w->pevents; | ||
| 1409 | - e.data.fd = w->fd; | ||
| 1410 | - | ||
| 1411 | - uv__epoll_ctl_prep(epollfd, ctl, &prep, op, w->fd, &e); | ||
| 1412 | - } | ||
| 1413 | - | ||
| 1414 | - inv.events = events; | ||
| 1415 | - inv.prep = &prep; | ||
| 1416 | - inv.nfds = -1; | ||
| 1417 | - | ||
| 1418 | for (;;) { | ||
| 1419 | - if (loop->nfds == 0) | ||
| 1420 | - if (iou->in_flight == 0) | ||
| 1421 | - break; | ||
| 1422 | - | ||
| 1423 | - /* All event mask mutations should be visible to the kernel before | ||
| 1424 | - * we enter epoll_pwait(). | ||
| 1425 | - */ | ||
| 1426 | - if (ctl->ringfd != -1) | ||
| 1427 | - while (*ctl->sqhead != *ctl->sqtail) | ||
| 1428 | - uv__epoll_ctl_flush(epollfd, ctl, &prep); | ||
| 1429 | - | ||
| 1430 | /* Only need to set the provider_entry_time if timeout != 0. The function | ||
| 1431 | * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. | ||
| 1432 | */ | ||
| 1433 | if (timeout != 0) | ||
| 1434 | uv__metrics_set_provider_entry_time(loop); | ||
| 1435 | |||
| 1436 | - /* Store the current timeout in a location that's globally accessible so | ||
| 1437 | - * other locations like uv__work_done() can determine whether the queue | ||
| 1438 | - * of events in the callback were waiting when poll was called. | ||
| 1439 | + /* See the comment for max_safe_timeout for an explanation of why | ||
| 1440 | + * this is necessary. Executive summary: kernel bug workaround. | ||
| 1441 | */ | ||
| 1442 | - lfields->current_timeout = timeout; | ||
| 1443 | + if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) | ||
| 1444 | + timeout = max_safe_timeout; | ||
| 1445 | |||
| 1446 | - nfds = epoll_pwait(epollfd, events, ARRAY_SIZE(events), timeout, sigmask); | ||
| 1447 | + nfds = epoll_pwait(loop->backend_fd, | ||
| 1448 | + events, | ||
| 1449 | + ARRAY_SIZE(events), | ||
| 1450 | + timeout, | ||
| 1451 | + sigmask); | ||
| 1452 | |||
| 1453 | /* Update loop->time unconditionally. It's tempting to skip the update when | ||
| 1454 | * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the | ||
| 1455 | @@ -1203,7 +427,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1456 | continue; | ||
| 1457 | |||
| 1458 | if (timeout == 0) | ||
| 1459 | - break; | ||
| 1460 | + return; | ||
| 1461 | |||
| 1462 | /* We may have been inside the system call for longer than |timeout| | ||
| 1463 | * milliseconds so we need to update the timestamp to avoid drift. | ||
| 1464 | @@ -1224,18 +448,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1465 | continue; | ||
| 1466 | |||
| 1467 | if (timeout == 0) | ||
| 1468 | - break; | ||
| 1469 | + return; | ||
| 1470 | |||
| 1471 | /* Interrupted by a signal. Update timeout and poll again. */ | ||
| 1472 | goto update_timeout; | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | - have_iou_events = 0; | ||
| 1476 | have_signals = 0; | ||
| 1477 | nevents = 0; | ||
| 1478 | |||
| 1479 | - inv.nfds = nfds; | ||
| 1480 | - lfields->inv = &inv; | ||
| 1481 | + { | ||
| 1482 | + /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */ | ||
| 1483 | + union { | ||
| 1484 | + struct epoll_event* events; | ||
| 1485 | + uv__io_t* watchers; | ||
| 1486 | + } x; | ||
| 1487 | + | ||
| 1488 | + x.events = events; | ||
| 1489 | + assert(loop->watchers != NULL); | ||
| 1490 | + loop->watchers[loop->nwatchers] = x.watchers; | ||
| 1491 | + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; | ||
| 1492 | + } | ||
| 1493 | |||
| 1494 | for (i = 0; i < nfds; i++) { | ||
| 1495 | pe = events + i; | ||
| 1496 | @@ -1245,12 +478,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1497 | if (fd == -1) | ||
| 1498 | continue; | ||
| 1499 | |||
| 1500 | - if (fd == iou->ringfd) { | ||
| 1501 | - uv__poll_io_uring(loop, iou); | ||
| 1502 | - have_iou_events = 1; | ||
| 1503 | - continue; | ||
| 1504 | - } | ||
| 1505 | - | ||
| 1506 | assert(fd >= 0); | ||
| 1507 | assert((unsigned) fd < loop->nwatchers); | ||
| 1508 | |||
| 1509 | @@ -1262,7 +489,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1510 | * Ignore all errors because we may be racing with another thread | ||
| 1511 | * when the file descriptor is closed. | ||
| 1512 | */ | ||
| 1513 | - uv__epoll_ctl_prep(epollfd, ctl, &prep, EPOLL_CTL_DEL, fd, pe); | ||
| 1514 | + epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe); | ||
| 1515 | continue; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | @@ -1319,13 +546,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1519 | loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | - lfields->inv = NULL; | ||
| 1523 | - | ||
| 1524 | - if (have_iou_events != 0) | ||
| 1525 | - break; /* Event loop should cycle now so don't poll again. */ | ||
| 1526 | + loop->watchers[loop->nwatchers] = NULL; | ||
| 1527 | + loop->watchers[loop->nwatchers + 1] = NULL; | ||
| 1528 | |||
| 1529 | if (have_signals != 0) | ||
| 1530 | - break; /* Event loop should cycle now so don't poll again. */ | ||
| 1531 | + return; /* Event loop should cycle now so don't poll again. */ | ||
| 1532 | |||
| 1533 | if (nevents != 0) { | ||
| 1534 | if (nfds == ARRAY_SIZE(events) && --count != 0) { | ||
| 1535 | @@ -1333,11 +558,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1536 | timeout = 0; | ||
| 1537 | continue; | ||
| 1538 | } | ||
| 1539 | - break; | ||
| 1540 | + return; | ||
| 1541 | } | ||
| 1542 | |||
| 1543 | if (timeout == 0) | ||
| 1544 | - break; | ||
| 1545 | + return; | ||
| 1546 | |||
| 1547 | if (timeout == -1) | ||
| 1548 | continue; | ||
| 1549 | @@ -1347,14 +572,10 @@ update_timeout: | ||
| 1550 | |||
| 1551 | real_timeout -= (loop->time - base); | ||
| 1552 | if (real_timeout <= 0) | ||
| 1553 | - break; | ||
| 1554 | + return; | ||
| 1555 | |||
| 1556 | timeout = real_timeout; | ||
| 1557 | } | ||
| 1558 | - | ||
| 1559 | - if (ctl->ringfd != -1) | ||
| 1560 | - while (*ctl->sqhead != *ctl->sqtail) | ||
| 1561 | - uv__epoll_ctl_flush(epollfd, ctl, &prep); | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | uint64_t uv__hrtime(uv_clocktype_t type) { | ||
| 1565 | @@ -1867,7 +1088,7 @@ static uint64_t uv__read_uint64(const char* filename) { | ||
| 1566 | if (0 == uv__slurp(filename, buf, sizeof(buf))) | ||
| 1567 | if (1 != sscanf(buf, "%" PRIu64, &rc)) | ||
| 1568 | if (0 == strcmp(buf, "max\n")) | ||
| 1569 | - rc = UINT64_MAX; | ||
| 1570 | + rc = ~0ull; | ||
| 1571 | |||
| 1572 | return rc; | ||
| 1573 | } | ||
| 1574 | @@ -1903,7 +1124,6 @@ static void uv__get_cgroup1_memory_limits(char buf[static 1024], uint64_t* high, | ||
| 1575 | char filename[4097]; | ||
| 1576 | char* p; | ||
| 1577 | int n; | ||
| 1578 | - uint64_t cgroup1_max; | ||
| 1579 | |||
| 1580 | /* Find out where the controller is mounted. */ | ||
| 1581 | p = uv__cgroup1_find_memory_controller(buf, &n); | ||
| 1582 | @@ -1920,22 +1140,12 @@ static void uv__get_cgroup1_memory_limits(char buf[static 1024], uint64_t* high, | ||
| 1583 | * as indicated by uv__read_uint64 returning 0. | ||
| 1584 | */ | ||
| 1585 | if (*high != 0 && *max != 0) | ||
| 1586 | - goto update_limits; | ||
| 1587 | + return; | ||
| 1588 | } | ||
| 1589 | |||
| 1590 | /* Fall back to the limits of the global memory controller. */ | ||
| 1591 | *high = uv__read_uint64("/sys/fs/cgroup/memory/memory.soft_limit_in_bytes"); | ||
| 1592 | *max = uv__read_uint64("/sys/fs/cgroup/memory/memory.limit_in_bytes"); | ||
| 1593 | - | ||
| 1594 | - /* uv__read_uint64 detects cgroup2's "max", so we need to separately detect | ||
| 1595 | - * cgroup1's maximum value (which is derived from LONG_MAX and PAGE_SIZE). | ||
| 1596 | - */ | ||
| 1597 | -update_limits: | ||
| 1598 | - cgroup1_max = LONG_MAX & ~(sysconf(_SC_PAGESIZE) - 1); | ||
| 1599 | - if (*high == cgroup1_max) | ||
| 1600 | - *high = UINT64_MAX; | ||
| 1601 | - if (*max == cgroup1_max) | ||
| 1602 | - *max = UINT64_MAX; | ||
| 1603 | } | ||
| 1604 | |||
| 1605 | static void uv__get_cgroup2_memory_limits(char buf[static 1024], uint64_t* high, | ||
| 1606 | diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c | ||
| 1607 | index a87c2d77faf..3954b2c2753 100644 | ||
| 1608 | --- a/deps/uv/src/unix/os390.c | ||
| 1609 | +++ b/deps/uv/src/unix/os390.c | ||
| 1610 | @@ -808,7 +808,6 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) { | ||
| 1611 | |||
| 1612 | void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1613 | static const int max_safe_timeout = 1789569; | ||
| 1614 | - uv__loop_internal_fields_t* lfields; | ||
| 1615 | struct epoll_event events[1024]; | ||
| 1616 | struct epoll_event* pe; | ||
| 1617 | struct epoll_event e; | ||
| 1618 | @@ -831,8 +830,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1619 | return; | ||
| 1620 | } | ||
| 1621 | |||
| 1622 | - lfields = uv__get_internal_fields(loop); | ||
| 1623 | - | ||
| 1624 | while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1625 | uv_stream_t* stream; | ||
| 1626 | |||
| 1627 | @@ -880,7 +877,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1628 | int nevents = 0; | ||
| 1629 | have_signals = 0; | ||
| 1630 | |||
| 1631 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 1632 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 1633 | reset_timeout = 1; | ||
| 1634 | user_timeout = timeout; | ||
| 1635 | timeout = 0; | ||
| 1636 | @@ -899,12 +896,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1637 | if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) | ||
| 1638 | timeout = max_safe_timeout; | ||
| 1639 | |||
| 1640 | - /* Store the current timeout in a location that's globally accessible so | ||
| 1641 | - * other locations like uv__work_done() can determine whether the queue | ||
| 1642 | - * of events in the callback were waiting when poll was called. | ||
| 1643 | - */ | ||
| 1644 | - lfields->current_timeout = timeout; | ||
| 1645 | - | ||
| 1646 | nfds = epoll_wait(loop->ep, events, | ||
| 1647 | ARRAY_SIZE(events), timeout); | ||
| 1648 | |||
| 1649 | diff --git a/deps/uv/src/unix/posix-poll.c b/deps/uv/src/unix/posix-poll.c | ||
| 1650 | index 7e7de86845d..711780ece8d 100644 | ||
| 1651 | --- a/deps/uv/src/unix/posix-poll.c | ||
| 1652 | +++ b/deps/uv/src/unix/posix-poll.c | ||
| 1653 | @@ -132,7 +132,6 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) { | ||
| 1654 | |||
| 1655 | |||
| 1656 | void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1657 | - uv__loop_internal_fields_t* lfields; | ||
| 1658 | sigset_t* pset; | ||
| 1659 | sigset_t set; | ||
| 1660 | uint64_t time_base; | ||
| 1661 | @@ -153,8 +152,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1662 | return; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | - lfields = uv__get_internal_fields(loop); | ||
| 1666 | - | ||
| 1667 | /* Take queued watchers and add their fds to our poll fds array. */ | ||
| 1668 | while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||
| 1669 | q = QUEUE_HEAD(&loop->watcher_queue); | ||
| 1670 | @@ -182,7 +179,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1671 | assert(timeout >= -1); | ||
| 1672 | time_base = loop->time; | ||
| 1673 | |||
| 1674 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 1675 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 1676 | reset_timeout = 1; | ||
| 1677 | user_timeout = timeout; | ||
| 1678 | timeout = 0; | ||
| 1679 | @@ -201,12 +198,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||
| 1680 | if (timeout != 0) | ||
| 1681 | uv__metrics_set_provider_entry_time(loop); | ||
| 1682 | |||
| 1683 | - /* Store the current timeout in a location that's globally accessible so | ||
| 1684 | - * other locations like uv__work_done() can determine whether the queue | ||
| 1685 | - * of events in the callback were waiting when poll was called. | ||
| 1686 | - */ | ||
| 1687 | - lfields->current_timeout = timeout; | ||
| 1688 | - | ||
| 1689 | if (pset != NULL) | ||
| 1690 | if (pthread_sigmask(SIG_BLOCK, pset, NULL)) | ||
| 1691 | abort(); | ||
| 1692 | diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h | ||
| 1693 | index decde5362c8..2720121addc 100644 | ||
| 1694 | --- a/deps/uv/src/uv-common.h | ||
| 1695 | +++ b/deps/uv/src/uv-common.h | ||
| 1696 | @@ -396,37 +396,9 @@ struct uv__loop_metrics_s { | ||
| 1697 | void uv__metrics_update_idle_time(uv_loop_t* loop); | ||
| 1698 | void uv__metrics_set_provider_entry_time(uv_loop_t* loop); | ||
| 1699 | |||
| 1700 | -#ifdef __linux__ | ||
| 1701 | -struct uv__iou { | ||
| 1702 | - uint32_t* sqhead; | ||
| 1703 | - uint32_t* sqtail; | ||
| 1704 | - uint32_t* sqarray; | ||
| 1705 | - uint32_t sqmask; | ||
| 1706 | - uint32_t* sqflags; | ||
| 1707 | - uint32_t* cqhead; | ||
| 1708 | - uint32_t* cqtail; | ||
| 1709 | - uint32_t cqmask; | ||
| 1710 | - void* sq; /* pointer to munmap() on event loop teardown */ | ||
| 1711 | - void* cqe; /* pointer to array of struct uv__io_uring_cqe */ | ||
| 1712 | - void* sqe; /* pointer to array of struct uv__io_uring_sqe */ | ||
| 1713 | - size_t sqlen; | ||
| 1714 | - size_t cqlen; | ||
| 1715 | - size_t maxlen; | ||
| 1716 | - size_t sqelen; | ||
| 1717 | - int ringfd; | ||
| 1718 | - uint32_t in_flight; | ||
| 1719 | -}; | ||
| 1720 | -#endif /* __linux__ */ | ||
| 1721 | - | ||
| 1722 | struct uv__loop_internal_fields_s { | ||
| 1723 | unsigned int flags; | ||
| 1724 | uv__loop_metrics_t loop_metrics; | ||
| 1725 | - int current_timeout; | ||
| 1726 | -#ifdef __linux__ | ||
| 1727 | - struct uv__iou ctl; | ||
| 1728 | - struct uv__iou iou; | ||
| 1729 | - void* inv; /* used by uv__platform_invalidate_fd() */ | ||
| 1730 | -#endif /* __linux__ */ | ||
| 1731 | }; | ||
| 1732 | |||
| 1733 | #endif /* UV_COMMON_H_ */ | ||
| 1734 | diff --git a/deps/uv/src/win/core.c b/deps/uv/src/win/core.c | ||
| 1735 | index 9a3be58849a..e4041ec86a6 100644 | ||
| 1736 | --- a/deps/uv/src/win/core.c | ||
| 1737 | +++ b/deps/uv/src/win/core.c | ||
| 1738 | @@ -424,7 +424,6 @@ int uv_backend_timeout(const uv_loop_t* loop) { | ||
| 1739 | |||
| 1740 | |||
| 1741 | static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { | ||
| 1742 | - uv__loop_internal_fields_t* lfields; | ||
| 1743 | DWORD bytes; | ||
| 1744 | ULONG_PTR key; | ||
| 1745 | OVERLAPPED* overlapped; | ||
| 1746 | @@ -434,10 +433,9 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { | ||
| 1747 | uint64_t user_timeout; | ||
| 1748 | int reset_timeout; | ||
| 1749 | |||
| 1750 | - lfields = uv__get_internal_fields(loop); | ||
| 1751 | timeout_time = loop->time + timeout; | ||
| 1752 | |||
| 1753 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 1754 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 1755 | reset_timeout = 1; | ||
| 1756 | user_timeout = timeout; | ||
| 1757 | timeout = 0; | ||
| 1758 | @@ -452,12 +450,6 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { | ||
| 1759 | if (timeout != 0) | ||
| 1760 | uv__metrics_set_provider_entry_time(loop); | ||
| 1761 | |||
| 1762 | - /* Store the current timeout in a location that's globally accessible so | ||
| 1763 | - * other locations like uv__work_done() can determine whether the queue | ||
| 1764 | - * of events in the callback were waiting when poll was called. | ||
| 1765 | - */ | ||
| 1766 | - lfields->current_timeout = timeout; | ||
| 1767 | - | ||
| 1768 | GetQueuedCompletionStatus(loop->iocp, | ||
| 1769 | &bytes, | ||
| 1770 | &key, | ||
| 1771 | @@ -515,7 +507,6 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { | ||
| 1772 | |||
| 1773 | |||
| 1774 | static void uv__poll(uv_loop_t* loop, DWORD timeout) { | ||
| 1775 | - uv__loop_internal_fields_t* lfields; | ||
| 1776 | BOOL success; | ||
| 1777 | uv_req_t* req; | ||
| 1778 | OVERLAPPED_ENTRY overlappeds[128]; | ||
| 1779 | @@ -527,10 +518,9 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { | ||
| 1780 | uint64_t actual_timeout; | ||
| 1781 | int reset_timeout; | ||
| 1782 | |||
| 1783 | - lfields = uv__get_internal_fields(loop); | ||
| 1784 | timeout_time = loop->time + timeout; | ||
| 1785 | |||
| 1786 | - if (lfields->flags & UV_METRICS_IDLE_TIME) { | ||
| 1787 | + if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { | ||
| 1788 | reset_timeout = 1; | ||
| 1789 | user_timeout = timeout; | ||
| 1790 | timeout = 0; | ||
| 1791 | @@ -547,12 +537,6 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { | ||
| 1792 | if (timeout != 0) | ||
| 1793 | uv__metrics_set_provider_entry_time(loop); | ||
| 1794 | |||
| 1795 | - /* Store the current timeout in a location that's globally accessible so | ||
| 1796 | - * other locations like uv__work_done() can determine whether the queue | ||
| 1797 | - * of events in the callback were waiting when poll was called. | ||
| 1798 | - */ | ||
| 1799 | - lfields->current_timeout = timeout; | ||
| 1800 | - | ||
| 1801 | success = pGetQueuedCompletionStatusEx(loop->iocp, | ||
| 1802 | overlappeds, | ||
| 1803 | ARRAY_SIZE(overlappeds), | ||
diff --git a/meta-oe/recipes-devtools/nodejs/nodejs_20.8.1.bb b/meta-oe/recipes-devtools/nodejs/nodejs_20.8.1.bb index d02e733ba6..65f4eb3f3a 100644 --- a/meta-oe/recipes-devtools/nodejs/nodejs_20.8.1.bb +++ b/meta-oe/recipes-devtools/nodejs/nodejs_20.8.1.bb | |||
| @@ -24,6 +24,8 @@ SRC_URI = "http://nodejs.org/dist/v${PV}/node-v${PV}.tar.xz \ | |||
| 24 | file://0004-v8-don-t-override-ARM-CFLAGS.patch \ | 24 | file://0004-v8-don-t-override-ARM-CFLAGS.patch \ |
| 25 | file://system-c-ares.patch \ | 25 | file://system-c-ares.patch \ |
| 26 | file://0001-liftoff-Correct-function-signatures.patch \ | 26 | file://0001-liftoff-Correct-function-signatures.patch \ |
| 27 | file://0001-Revert-io_uring-changes-from-libuv-1.46.0.patch \ | ||
| 28 | file://0002-Revert-io_uring-changes-from-libuv-1.45.0.patch \ | ||
| 27 | file://run-ptest \ | 29 | file://run-ptest \ |
| 28 | " | 30 | " |
| 29 | 31 | ||
