diff options
author | normal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2018-04-21 03:12:36 +0000 |
---|---|---|
committer | normal <normal@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2018-04-21 03:12:36 +0000 |
commit | 645f7fbd4ec0199c6266df19ad82d99bdd8553a8 (patch) | |
tree | 9bf8bba9938ce3c535bd59ef3f5db2f82fc1f421 | |
parent | af72dcd91b70e94b0f8299ab0464dafe884d2695 (diff) | |
download | ruby-645f7fbd4ec0199c6266df19ad82d99bdd8553a8.tar.gz |
io.c: do not use rb_notify_fd_close close on recycled FD
It is unsafe to release GVL and call rb_notify_fd_close after
close(2) on any given FD. FDs (file descriptor) may be recycled
in other threads immediately after close() to point to a different
file description. Note the distinction between "file description"
and "file descriptor".
th-1 | th-2
-------------------------------+---------------------------------------
io_close_fptr |
rb_notify_fd_close(fd) |
fptr_finalize_flush |
close(fd) |
rb_thread_schedule |
| fd reused (via pipe/open/socket/etc)
rb_notify_fd_close(fd) |
| sees "stream closed" exception
| for DIFFERENT file description
* thread.c (rb_thread_io_blocking_region): adjust comment for list_del
* thread.c (rb_notify_fd_close): give busy list to caller
* thread.c (rb_thread_fd_close): loop on busy list
* io.c (io_close_fptr): do not call rb_thread_fd_close on invalid FD
* io.c (io_reopen): use rb_thread_fd_close
Fixes: r57422 ("io.c: close before wait")
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63216 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
-rw-r--r-- | io.c | 16 | ||||
-rw-r--r-- | thread.c | 28 |
2 files changed, 23 insertions, 21 deletions
@@ -21,6 +21,7 @@ #include <ctype.h> #include <errno.h> #include "ruby_atomic.h" +#include "ccan/list/list.h" #undef free #define free(x) xfree(x) @@ -4654,15 +4655,14 @@ rb_io_memsize(const rb_io_t *fptr) # define KEEPGVL FALSE #endif -int rb_notify_fd_close(int fd); +int rb_notify_fd_close(int fd, struct list_head *); static rb_io_t * io_close_fptr(VALUE io) { rb_io_t *fptr; - int fd; VALUE write_io; rb_io_t *write_fptr; - int busy; + LIST_HEAD(busy); write_io = GetWriteIO(io); if (io != write_io) { @@ -4676,11 +4676,9 @@ io_close_fptr(VALUE io) if (!fptr) return 0; if (fptr->fd < 0) return 0; - fd = fptr->fd; - busy = rb_notify_fd_close(fd); - if (busy) { - fptr_finalize_flush(fptr, FALSE, KEEPGVL); - do rb_thread_schedule(); while (rb_notify_fd_close(fd)); + if (rb_notify_fd_close(fptr->fd, &busy)) { + fptr_finalize_flush(fptr, FALSE, KEEPGVL); /* calls close(fptr->fd) */ + do rb_thread_schedule(); while (!list_empty(&busy)); } rb_io_fptr_cleanup(fptr, FALSE); return fptr; @@ -7185,7 +7183,7 @@ io_reopen(VALUE io, VALUE nfile) rb_update_max_fd(fd); fptr->fd = fd; } - rb_notify_fd_close(fd); + rb_thread_fd_close(fd); if ((orig->mode & FMODE_READABLE) && pos >= 0) { if (io_seek(fptr, pos, SEEK_SET) < 0 && errno) { rb_sys_fail_path(fptr->pathv); @@ -1527,7 +1527,10 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd) } EC_POP_TAG(); - /* must be deleted before jump */ + /* + * must be deleted before jump + * this will delete either from waiting_fds or on-stack LIST_HEAD(busy) + */ list_del(&wfd.wfd_node); if (state) { @@ -2260,35 +2263,36 @@ rb_ec_reset_raised(rb_execution_context_t *ec) } int -rb_notify_fd_close(int fd) +rb_notify_fd_close(int fd, struct list_head *busy) { rb_vm_t *vm = GET_THREAD()->vm; struct waiting_fd *wfd = 0; - int busy; + struct waiting_fd *next = 0; - busy = 0; - list_for_each(&vm->waiting_fds, wfd, wfd_node) { + list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) { if (wfd->fd == fd) { rb_thread_t *th = wfd->th; VALUE err; - busy = 1; - if (!th) { - continue; - } - wfd->th = 0; + list_del(&wfd->wfd_node); + list_add(busy, &wfd->wfd_node); + err = th->vm->special_exceptions[ruby_error_stream_closed]; rb_threadptr_pending_interrupt_enque(th, err); rb_threadptr_interrupt(th); } } - return busy; + return !list_empty(busy); } void rb_thread_fd_close(int fd) { - while (rb_notify_fd_close(fd)) rb_thread_schedule(); + LIST_HEAD(busy); + + if (rb_notify_fd_close(fd, &busy)) { + do rb_thread_schedule(); while (!list_empty(&busy)); + } } /* |