aboutsummaryrefslogtreecommitdiffstats
path: root/io.c
diff options
context:
space:
mode:
authorKJ Tsanaktsidis <kj@kjtsanaktsidis.id.au>2023-05-21 22:29:16 +1000
committerNobuyoshi Nakada <nobu@ruby-lang.org>2023-05-26 14:51:23 +0900
commit66871c5a06d723f8350935ced1e88d8cc929d809 (patch)
tree71cf77cf24923b1c10695a1b07e06742353cbfc8 /io.c
parent54a74c42033e42869e69e7dc9e67efa1faf225be (diff)
downloadruby-66871c5a06d723f8350935ced1e88d8cc929d809.tar.gz
Fix busy-loop when waiting for file descriptors to close
When one thread is closing a file descriptor whilst another thread is concurrently reading it, we need to wait for the reading thread to be done with it to prevent a potential EBADF (or, worse, file descriptor reuse). At the moment, that is done by keeping a list of threads still using the file descriptor in io_close_fptr. It then continually calls rb_thread_schedule() in fptr_finalize_flush until said list is empty. That busy-looping seems to behave rather poorly on some OS's, particulary FreeBSD. It can cause the TestIO#test_race_gets_and_close test to fail (even with its very long 200 second timeout) because the closing thread starves out the using thread. To fix that, I introduce the concept of struct rb_io_close_wait_list; a list of threads still using a file descriptor that we want to close. We call `rb_notify_fd_close` to let the thread scheduler know we're closing a FD, which fills the list with threads. Then, we call rb_notify_fd_close_wait which will block the thread until all of the still-using threads are done. This is implemented with a condition variable sleep, so no busy-looping is required.
Diffstat (limited to 'io.c')
-rw-r--r--io.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/io.c b/io.c
index 13965c7610..fea1a9cbcd 100644
--- a/io.c
+++ b/io.c
@@ -5422,9 +5422,17 @@ maygvl_fclose(FILE *file, int keepgvl)
static void free_io_buffer(rb_io_buffer_t *buf);
static void clear_codeconv(rb_io_t *fptr);
+static void*
+call_close_wait_nogvl(void *arg)
+{
+ struct rb_io_close_wait_list *busy = (struct rb_io_close_wait_list *)arg;
+ rb_notify_fd_close_wait(busy);
+ return NULL;
+}
+
static void
fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
- struct ccan_list_head *busy)
+ struct rb_io_close_wait_list *busy)
{
VALUE error = Qnil;
int fd = fptr->fd;
@@ -5467,7 +5475,7 @@ fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
// Ensure waiting_fd users do not hit EBADF.
if (busy) {
// Wait for them to exit before we call close().
- do rb_thread_schedule(); while (!ccan_list_empty(busy));
+ (void)rb_thread_call_without_gvl(call_close_wait_nogvl, busy, RUBY_UBF_IO, 0);
}
// Disable for now.
@@ -5618,16 +5626,14 @@ rb_io_memsize(const rb_io_t *fptr)
# define KEEPGVL FALSE
#endif
-int rb_notify_fd_close(int fd, struct ccan_list_head *);
static rb_io_t *
io_close_fptr(VALUE io)
{
rb_io_t *fptr;
VALUE write_io;
rb_io_t *write_fptr;
- struct ccan_list_head busy;
+ struct rb_io_close_wait_list busy;
- ccan_list_head_init(&busy);
write_io = GetWriteIO(io);
if (io != write_io) {
write_fptr = RFILE(write_io)->fptr;