00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "ruby/ruby.h"
00013 #include "internal.h"
00014 #include "vm_core.h"
00015 #include "gc.h"
00016 #include "eval_intern.h"
00017
00018 #if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || (defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT))) && !defined(__NetBSD__) && !defined(sun) && !defined(FIBER_USE_NATIVE)
00019 #define FIBER_USE_NATIVE 1
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037 #elif !defined(FIBER_USE_NATIVE)
00038 #define FIBER_USE_NATIVE 0
00039 #endif
00040
00041 #if FIBER_USE_NATIVE
00042 #ifndef _WIN32
00043 #include <unistd.h>
00044 #include <sys/mman.h>
00045 #include <ucontext.h>
00046 #endif
00047 #define RB_PAGE_SIZE (pagesize)
00048 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
00049 static long pagesize;
00050 #define FIBER_MACHINE_STACK_ALLOCATION_SIZE (0x10000)
00051 #endif
00052
00053 #define CAPTURE_JUST_VALID_VM_STACK 1
00054
00055 enum context_type {
00056 CONTINUATION_CONTEXT = 0,
00057 FIBER_CONTEXT = 1,
00058 ROOT_FIBER_CONTEXT = 2
00059 };
00060
00061 typedef struct rb_context_struct {
00062 enum context_type type;
00063 VALUE self;
00064 int argc;
00065 VALUE value;
00066 VALUE *vm_stack;
00067 #ifdef CAPTURE_JUST_VALID_VM_STACK
00068 size_t vm_stack_slen;
00069 size_t vm_stack_clen;
00070 #endif
00071 VALUE *machine_stack;
00072 VALUE *machine_stack_src;
00073 #ifdef __ia64
00074 VALUE *machine_register_stack;
00075 VALUE *machine_register_stack_src;
00076 int machine_register_stack_size;
00077 #endif
00078 rb_thread_t saved_thread;
00079 rb_jmpbuf_t jmpbuf;
00080 size_t machine_stack_size;
00081 } rb_context_t;
00082
00083 enum fiber_status {
00084 CREATED,
00085 RUNNING,
00086 TERMINATED
00087 };
00088
00089 #if FIBER_USE_NATIVE && !defined(_WIN32)
00090 #define MAX_MAHINE_STACK_CACHE 10
00091 static int machine_stack_cache_index = 0;
00092 typedef struct machine_stack_cache_struct {
00093 void *ptr;
00094 size_t size;
00095 } machine_stack_cache_t;
00096 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
00097 static machine_stack_cache_t terminated_machine_stack;
00098 #endif
00099
00100 typedef struct rb_fiber_struct {
00101 rb_context_t cont;
00102 VALUE prev;
00103 enum fiber_status status;
00104 struct rb_fiber_struct *prev_fiber;
00105 struct rb_fiber_struct *next_fiber;
00106 #if FIBER_USE_NATIVE
00107 #ifdef _WIN32
00108 void *fib_handle;
00109 #else
00110 ucontext_t context;
00111 #endif
00112 #endif
00113 } rb_fiber_t;
00114
00115 static const rb_data_type_t cont_data_type, fiber_data_type;
00116 static VALUE rb_cContinuation;
00117 static VALUE rb_cFiber;
00118 static VALUE rb_eFiberError;
00119
00120 #define GetContPtr(obj, ptr) \
00121 TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
00122
00123 #define GetFiberPtr(obj, ptr) do {\
00124 TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
00125 if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
00126 } while(0)
00127
00128 NOINLINE(static VALUE cont_capture(volatile int *stat));
00129
00130 #define THREAD_MUST_BE_RUNNING(th) do { \
00131 if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
00132 } while (0)
00133
00134 static void
00135 cont_mark(void *ptr)
00136 {
00137 RUBY_MARK_ENTER("cont");
00138 if (ptr) {
00139 rb_context_t *cont = ptr;
00140 rb_gc_mark(cont->value);
00141 rb_thread_mark(&cont->saved_thread);
00142 rb_gc_mark(cont->saved_thread.self);
00143
00144 if (cont->vm_stack) {
00145 #ifdef CAPTURE_JUST_VALID_VM_STACK
00146 rb_gc_mark_locations(cont->vm_stack,
00147 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
00148 #else
00149 rb_gc_mark_localtion(cont->vm_stack,
00150 cont->vm_stack, cont->saved_thread.stack_size);
00151 #endif
00152 }
00153
00154 if (cont->machine_stack) {
00155 if (cont->type == CONTINUATION_CONTEXT) {
00156
00157 rb_gc_mark_locations(cont->machine_stack,
00158 cont->machine_stack + cont->machine_stack_size);
00159 }
00160 else {
00161
00162 rb_thread_t *th;
00163 rb_fiber_t *fib = (rb_fiber_t*)cont;
00164 GetThreadPtr(cont->saved_thread.self, th);
00165 if ((th->fiber != cont->self) && fib->status == RUNNING) {
00166 rb_gc_mark_locations(cont->machine_stack,
00167 cont->machine_stack + cont->machine_stack_size);
00168 }
00169 }
00170 }
00171 #ifdef __ia64
00172 if (cont->machine_register_stack) {
00173 rb_gc_mark_locations(cont->machine_register_stack,
00174 cont->machine_register_stack + cont->machine_register_stack_size);
00175 }
00176 #endif
00177 }
00178 RUBY_MARK_LEAVE("cont");
00179 }
00180
00181 static void
00182 cont_free(void *ptr)
00183 {
00184 RUBY_FREE_ENTER("cont");
00185 if (ptr) {
00186 rb_context_t *cont = ptr;
00187 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
00188 #if FIBER_USE_NATIVE
00189 if (cont->type == CONTINUATION_CONTEXT) {
00190
00191 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00192 }
00193 else {
00194
00195 #ifdef _WIN32
00196 if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
00197
00198 rb_fiber_t *fib = (rb_fiber_t*)cont;
00199 if (fib->fib_handle) {
00200 DeleteFiber(fib->fib_handle);
00201 }
00202 }
00203 #else
00204 if (GET_THREAD()->fiber != cont->self) {
00205 rb_fiber_t *fib = (rb_fiber_t*)cont;
00206 if (fib->context.uc_stack.ss_sp) {
00207 if (cont->type == ROOT_FIBER_CONTEXT) {
00208 rb_bug("Illegal root fiber parameter");
00209 }
00210 munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
00211 }
00212 }
00213 else {
00214
00215
00216
00217 }
00218 #endif
00219 }
00220 #else
00221 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00222 #endif
00223 #ifdef __ia64
00224 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
00225 #endif
00226 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
00227
00228
00229 ruby_xfree(ptr);
00230 }
00231 RUBY_FREE_LEAVE("cont");
00232 }
00233
00234 static size_t
00235 cont_memsize(const void *ptr)
00236 {
00237 const rb_context_t *cont = ptr;
00238 size_t size = 0;
00239 if (cont) {
00240 size = sizeof(*cont);
00241 if (cont->vm_stack) {
00242 #ifdef CAPTURE_JUST_VALID_VM_STACK
00243 size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
00244 #else
00245 size_t n = cont->saved_thread.stack_size;
00246 #endif
00247 size += n * sizeof(*cont->vm_stack);
00248 }
00249
00250 if (cont->machine_stack) {
00251 size += cont->machine_stack_size * sizeof(*cont->machine_stack);
00252 }
00253 #ifdef __ia64
00254 if (cont->machine_register_stack) {
00255 size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
00256 }
00257 #endif
00258 }
00259 return size;
00260 }
00261
00262 static void
00263 fiber_mark(void *ptr)
00264 {
00265 RUBY_MARK_ENTER("cont");
00266 if (ptr) {
00267 rb_fiber_t *fib = ptr;
00268 rb_gc_mark(fib->prev);
00269 cont_mark(&fib->cont);
00270 }
00271 RUBY_MARK_LEAVE("cont");
00272 }
00273
00274 static void
00275 fiber_link_join(rb_fiber_t *fib)
00276 {
00277 VALUE current_fibval = rb_fiber_current();
00278 rb_fiber_t *current_fib;
00279 GetFiberPtr(current_fibval, current_fib);
00280
00281
00282 fib->next_fiber = current_fib->next_fiber;
00283 fib->prev_fiber = current_fib;
00284 current_fib->next_fiber->prev_fiber = fib;
00285 current_fib->next_fiber = fib;
00286 }
00287
00288 static void
00289 fiber_link_remove(rb_fiber_t *fib)
00290 {
00291 fib->prev_fiber->next_fiber = fib->next_fiber;
00292 fib->next_fiber->prev_fiber = fib->prev_fiber;
00293 }
00294
00295 static void
00296 fiber_free(void *ptr)
00297 {
00298 RUBY_FREE_ENTER("fiber");
00299 if (ptr) {
00300 rb_fiber_t *fib = ptr;
00301 if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00302 fib->cont.saved_thread.local_storage) {
00303 st_free_table(fib->cont.saved_thread.local_storage);
00304 }
00305 fiber_link_remove(fib);
00306
00307 cont_free(&fib->cont);
00308 }
00309 RUBY_FREE_LEAVE("fiber");
00310 }
00311
00312 static size_t
00313 fiber_memsize(const void *ptr)
00314 {
00315 const rb_fiber_t *fib = ptr;
00316 size_t size = 0;
00317 if (ptr) {
00318 size = sizeof(*fib);
00319 if (fib->cont.type != ROOT_FIBER_CONTEXT) {
00320 size += st_memsize(fib->cont.saved_thread.local_storage);
00321 }
00322 size += cont_memsize(&fib->cont);
00323 }
00324 return size;
00325 }
00326
00327 VALUE
00328 rb_obj_is_fiber(VALUE obj)
00329 {
00330 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
00331 return Qtrue;
00332 }
00333 else {
00334 return Qfalse;
00335 }
00336 }
00337
00338 static void
00339 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
00340 {
00341 size_t size;
00342
00343 SET_MACHINE_STACK_END(&th->machine_stack_end);
00344 #ifdef __ia64
00345 th->machine_register_stack_end = rb_ia64_bsp();
00346 #endif
00347
00348 if (th->machine_stack_start > th->machine_stack_end) {
00349 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00350 cont->machine_stack_src = th->machine_stack_end;
00351 }
00352 else {
00353 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00354 cont->machine_stack_src = th->machine_stack_start;
00355 }
00356
00357 if (cont->machine_stack) {
00358 REALLOC_N(cont->machine_stack, VALUE, size);
00359 }
00360 else {
00361 cont->machine_stack = ALLOC_N(VALUE, size);
00362 }
00363
00364 FLUSH_REGISTER_WINDOWS;
00365 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
00366
00367 #ifdef __ia64
00368 rb_ia64_flushrs();
00369 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
00370 cont->machine_register_stack_src = th->machine_register_stack_start;
00371 if (cont->machine_register_stack) {
00372 REALLOC_N(cont->machine_register_stack, VALUE, size);
00373 }
00374 else {
00375 cont->machine_register_stack = ALLOC_N(VALUE, size);
00376 }
00377
00378 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
00379 #endif
00380 }
00381
00382 static const rb_data_type_t cont_data_type = {
00383 "continuation",
00384 {cont_mark, cont_free, cont_memsize,},
00385 };
00386
00387 static void
00388 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
00389 {
00390
00391 cont->saved_thread = *th;
00392
00393
00394 cont->saved_thread.machine_stack_start = 0;
00395 cont->saved_thread.machine_stack_end = 0;
00396 #ifdef __ia64
00397 cont->saved_thread.machine_register_stack_start = 0;
00398 cont->saved_thread.machine_register_stack_end = 0;
00399 #endif
00400 }
00401
00402 static void
00403 cont_init(rb_context_t *cont, rb_thread_t *th)
00404 {
00405
00406 cont_save_thread(cont, th);
00407 cont->saved_thread.local_storage = 0;
00408 }
00409
00410 static rb_context_t *
00411 cont_new(VALUE klass)
00412 {
00413 rb_context_t *cont;
00414 volatile VALUE contval;
00415 rb_thread_t *th = GET_THREAD();
00416
00417 THREAD_MUST_BE_RUNNING(th);
00418 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
00419 cont->self = contval;
00420 cont_init(cont, th);
00421 return cont;
00422 }
00423
00424 static VALUE
00425 cont_capture(volatile int *stat)
00426 {
00427 rb_context_t *cont;
00428 rb_thread_t *th = GET_THREAD(), *sth;
00429 volatile VALUE contval;
00430
00431 THREAD_MUST_BE_RUNNING(th);
00432 rb_vm_stack_to_heap(th);
00433 cont = cont_new(rb_cContinuation);
00434 contval = cont->self;
00435 sth = &cont->saved_thread;
00436
00437 #ifdef CAPTURE_JUST_VALID_VM_STACK
00438 cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
00439 cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
00440 cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
00441 MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
00442 MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
00443 #else
00444 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
00445 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
00446 #endif
00447 sth->stack = 0;
00448
00449 cont_save_machine_stack(th, cont);
00450
00451 if (ruby_setjmp(cont->jmpbuf)) {
00452 volatile VALUE value;
00453
00454 value = cont->value;
00455 if (cont->argc == -1) rb_exc_raise(value);
00456 cont->value = Qnil;
00457 *stat = 1;
00458 return value;
00459 }
00460 else {
00461 *stat = 0;
00462 return cont->self;
00463 }
00464 }
00465
00466 static void
00467 cont_restore_thread(rb_context_t *cont)
00468 {
00469 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
00470
00471
00472 if (cont->type == CONTINUATION_CONTEXT) {
00473
00474 VALUE fib;
00475
00476 th->fiber = sth->fiber;
00477 fib = th->fiber ? th->fiber : th->root_fiber;
00478
00479 if (fib) {
00480 rb_fiber_t *fcont;
00481 GetFiberPtr(fib, fcont);
00482 th->stack_size = fcont->cont.saved_thread.stack_size;
00483 th->stack = fcont->cont.saved_thread.stack;
00484 }
00485 #ifdef CAPTURE_JUST_VALID_VM_STACK
00486 MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
00487 MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
00488 cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
00489 #else
00490 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
00491 #endif
00492 }
00493 else {
00494
00495 th->stack = sth->stack;
00496 th->stack_size = sth->stack_size;
00497 th->local_storage = sth->local_storage;
00498 th->fiber = cont->self;
00499 }
00500
00501 th->cfp = sth->cfp;
00502 th->safe_level = sth->safe_level;
00503 th->raised_flag = sth->raised_flag;
00504 th->state = sth->state;
00505 th->status = sth->status;
00506 th->tag = sth->tag;
00507 th->protect_tag = sth->protect_tag;
00508 th->errinfo = sth->errinfo;
00509 th->first_proc = sth->first_proc;
00510 }
00511
00512 #if FIBER_USE_NATIVE
00513 #ifdef _WIN32
00514 static void
00515 fiber_set_stack_location(void)
00516 {
00517 rb_thread_t *th = GET_THREAD();
00518 VALUE *ptr;
00519
00520 SET_MACHINE_STACK_END(&ptr);
00521 th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
00522 }
00523
00524 static VOID CALLBACK
00525 fiber_entry(void *arg)
00526 {
00527 fiber_set_stack_location();
00528 rb_fiber_start();
00529 }
00530 #else
00531
00532
00533
00534
00535
00536
00537 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
00538 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
00539 #else
00540 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
00541 #endif
00542
00543 static char*
00544 fiber_machine_stack_alloc(size_t size)
00545 {
00546 char *ptr;
00547
00548 if (machine_stack_cache_index > 0) {
00549 if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
00550 ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
00551 machine_stack_cache_index--;
00552 machine_stack_cache[machine_stack_cache_index].ptr = NULL;
00553 machine_stack_cache[machine_stack_cache_index].size = 0;
00554 }
00555 else{
00556
00557 rb_bug("machine_stack_cache size is not canonicalized");
00558 }
00559 }
00560 else {
00561 void *page;
00562 STACK_GROW_DIR_DETECTION;
00563
00564 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
00565 if (ptr == MAP_FAILED) {
00566 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
00567 }
00568
00569
00570 page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
00571 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
00572 rb_raise(rb_eFiberError, "mprotect failed");
00573 }
00574 }
00575
00576 return ptr;
00577 }
00578 #endif
00579
00580 static void
00581 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
00582 {
00583 rb_thread_t *sth = &fib->cont.saved_thread;
00584
00585 #ifdef _WIN32
00586 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00587 if (!fib->fib_handle) {
00588
00589 rb_gc();
00590 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00591 if (!fib->fib_handle) {
00592 rb_raise(rb_eFiberError, "can't create fiber");
00593 }
00594 }
00595 sth->machine_stack_maxsize = size;
00596 #else
00597 ucontext_t *context = &fib->context;
00598 char *ptr;
00599 STACK_GROW_DIR_DETECTION;
00600
00601 getcontext(context);
00602 ptr = fiber_machine_stack_alloc(size);
00603 context->uc_link = NULL;
00604 context->uc_stack.ss_sp = ptr;
00605 context->uc_stack.ss_size = size;
00606 makecontext(context, rb_fiber_start, 0);
00607 sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
00608 sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
00609 #endif
00610 #ifdef __ia64
00611 sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
00612 #endif
00613 }
00614
00615 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
00616
00617 static void
00618 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
00619 {
00620 rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
00621
00622 if (newfib->status != RUNNING) {
00623 fiber_initialize_machine_stack_context(newfib, FIBER_MACHINE_STACK_ALLOCATION_SIZE);
00624 }
00625
00626
00627 cont_restore_thread(&newfib->cont);
00628 th->machine_stack_maxsize = sth->machine_stack_maxsize;
00629 if (sth->machine_stack_end && (newfib != oldfib)) {
00630 rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
00631 }
00632
00633
00634 if (oldfib->status != TERMINATED) {
00635 STACK_GROW_DIR_DETECTION;
00636 SET_MACHINE_STACK_END(&th->machine_stack_end);
00637 if (STACK_DIR_UPPER(0, 1)) {
00638 oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00639 oldfib->cont.machine_stack = th->machine_stack_end;
00640 }
00641 else {
00642 oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00643 oldfib->cont.machine_stack = th->machine_stack_start;
00644 }
00645 }
00646
00647 oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start;
00648 th->machine_stack_start = sth->machine_stack_start;
00649
00650 oldfib->cont.saved_thread.machine_stack_end = 0;
00651 #ifndef _WIN32
00652 if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
00653 rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
00654 }
00655 #endif
00656
00657
00658 #ifdef _WIN32
00659 SwitchToFiber(newfib->fib_handle);
00660 #elif defined(__FreeBSD__)
00661 if (!ruby_setjmp(oldfib->cont.jmpbuf)) {
00662 if (newfib->status != RUNNING) {
00663 if (setcontext(&newfib->context) < 0) {
00664 rb_bug("context switch between fiber failed");
00665 }
00666 }
00667 else {
00668 ruby_longjmp(newfib->cont.jmpbuf, 1);
00669 }
00670 }
00671 #else
00672 swapcontext(&oldfib->context, &newfib->context);
00673 #endif
00674 }
00675 #endif
00676
00677 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
00678
00679 static void
00680 cont_restore_1(rb_context_t *cont)
00681 {
00682 cont_restore_thread(cont);
00683
00684
00685 #ifdef _M_AMD64
00686 {
00687
00688 jmp_buf buf;
00689 setjmp(buf);
00690 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
00691 ((_JUMP_BUFFER*)(&buf))->Frame;
00692 }
00693 #endif
00694 if (cont->machine_stack_src) {
00695 FLUSH_REGISTER_WINDOWS;
00696 MEMCPY(cont->machine_stack_src, cont->machine_stack,
00697 VALUE, cont->machine_stack_size);
00698 }
00699
00700 #ifdef __ia64
00701 if (cont->machine_register_stack_src) {
00702 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
00703 VALUE, cont->machine_register_stack_size);
00704 }
00705 #endif
00706
00707 ruby_longjmp(cont->jmpbuf, 1);
00708 }
00709
00710 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
00711
00712 #ifdef __ia64
00713 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
00714 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
00715 static volatile int C(a), C(b), C(c), C(d), C(e);
00716 static volatile int C(f), C(g), C(h), C(i), C(j);
00717 static volatile int C(k), C(l), C(m), C(n), C(o);
00718 static volatile int C(p), C(q), C(r), C(s), C(t);
00719 #if 0
00720 {}
00721 #endif
00722 int rb_dummy_false = 0;
00723 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
00724 static void
00725 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
00726 {
00727 if (rb_dummy_false) {
00728
00729 E(a) = E(b) = E(c) = E(d) = E(e) =
00730 E(f) = E(g) = E(h) = E(i) = E(j) =
00731 E(k) = E(l) = E(m) = E(n) = E(o) =
00732 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00733 E(a) = E(b) = E(c) = E(d) = E(e) =
00734 E(f) = E(g) = E(h) = E(i) = E(j) =
00735 E(k) = E(l) = E(m) = E(n) = E(o) =
00736 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00737 }
00738 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
00739 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00740 }
00741 cont_restore_0(cont, vp);
00742 }
00743 #undef C
00744 #undef E
00745 #endif
00746
00747 static void
00748 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
00749 {
00750 if (cont->machine_stack_src) {
00751 #ifdef HAVE_ALLOCA
00752 #define STACK_PAD_SIZE 1
00753 #else
00754 #define STACK_PAD_SIZE 1024
00755 #endif
00756 VALUE space[STACK_PAD_SIZE];
00757
00758 #if !STACK_GROW_DIRECTION
00759 if (addr_in_prev_frame > &space[0]) {
00760
00761 #endif
00762 #if STACK_GROW_DIRECTION <= 0
00763 volatile VALUE *const end = cont->machine_stack_src;
00764 if (&space[0] > end) {
00765 # ifdef HAVE_ALLOCA
00766 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
00767 space[0] = *sp;
00768 # else
00769 cont_restore_0(cont, &space[0]);
00770 # endif
00771 }
00772 #endif
00773 #if !STACK_GROW_DIRECTION
00774 }
00775 else {
00776
00777 #endif
00778 #if STACK_GROW_DIRECTION >= 0
00779 volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
00780 if (&space[STACK_PAD_SIZE] < end) {
00781 # ifdef HAVE_ALLOCA
00782 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
00783 space[0] = *sp;
00784 # else
00785 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
00786 # endif
00787 }
00788 #endif
00789 #if !STACK_GROW_DIRECTION
00790 }
00791 #endif
00792 }
00793 cont_restore_1(cont);
00794 }
00795 #ifdef __ia64
00796 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp());
00797 #endif
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863
00864 static VALUE
00865 rb_callcc(VALUE self)
00866 {
00867 volatile int called;
00868 volatile VALUE val = cont_capture(&called);
00869
00870 if (called) {
00871 return val;
00872 }
00873 else {
00874 return rb_yield(val);
00875 }
00876 }
00877
00878 static VALUE
00879 make_passing_arg(int argc, VALUE *argv)
00880 {
00881 switch(argc) {
00882 case 0:
00883 return Qnil;
00884 case 1:
00885 return argv[0];
00886 default:
00887 return rb_ary_new4(argc, argv);
00888 }
00889 }
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907 static VALUE
00908 rb_cont_call(int argc, VALUE *argv, VALUE contval)
00909 {
00910 rb_context_t *cont;
00911 rb_thread_t *th = GET_THREAD();
00912 GetContPtr(contval, cont);
00913
00914 if (cont->saved_thread.self != th->self) {
00915 rb_raise(rb_eRuntimeError, "continuation called across threads");
00916 }
00917 if (cont->saved_thread.protect_tag != th->protect_tag) {
00918 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
00919 }
00920 if (cont->saved_thread.fiber) {
00921 rb_fiber_t *fcont;
00922 GetFiberPtr(cont->saved_thread.fiber, fcont);
00923
00924 if (th->fiber != cont->saved_thread.fiber) {
00925 rb_raise(rb_eRuntimeError, "continuation called across fiber");
00926 }
00927 }
00928
00929 cont->argc = argc;
00930 cont->value = make_passing_arg(argc, argv);
00931
00932 cont_restore_0(cont, &contval);
00933 return Qnil;
00934 }
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969
00970
00971
00972
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002 #define FIBER_VM_STACK_SIZE (4 * 1024)
01003
01004 static const rb_data_type_t fiber_data_type = {
01005 "fiber",
01006 {fiber_mark, fiber_free, fiber_memsize,},
01007 };
01008
01009 static VALUE
01010 fiber_alloc(VALUE klass)
01011 {
01012 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
01013 }
01014
01015 static rb_fiber_t*
01016 fiber_t_alloc(VALUE fibval)
01017 {
01018 rb_fiber_t *fib;
01019 rb_thread_t *th = GET_THREAD();
01020
01021 if (DATA_PTR(fibval) != 0) {
01022 rb_raise(rb_eRuntimeError, "cannot initialize twice");
01023 }
01024
01025 THREAD_MUST_BE_RUNNING(th);
01026 fib = ALLOC(rb_fiber_t);
01027 memset(fib, 0, sizeof(rb_fiber_t));
01028 fib->cont.self = fibval;
01029 fib->cont.type = FIBER_CONTEXT;
01030 cont_init(&fib->cont, th);
01031 fib->prev = Qnil;
01032 fib->status = CREATED;
01033
01034 DATA_PTR(fibval) = fib;
01035
01036 return fib;
01037 }
01038
01039 static VALUE
01040 fiber_init(VALUE fibval, VALUE proc)
01041 {
01042 rb_fiber_t *fib = fiber_t_alloc(fibval);
01043 rb_context_t *cont = &fib->cont;
01044 rb_thread_t *th = &cont->saved_thread;
01045
01046
01047 cont->vm_stack = 0;
01048
01049 th->stack = 0;
01050 th->stack_size = 0;
01051
01052 fiber_link_join(fib);
01053
01054 th->stack_size = FIBER_VM_STACK_SIZE;
01055 th->stack = ALLOC_N(VALUE, th->stack_size);
01056
01057 th->cfp = (void *)(th->stack + th->stack_size);
01058 th->cfp--;
01059 th->cfp->pc = 0;
01060 th->cfp->sp = th->stack + 1;
01061 th->cfp->bp = 0;
01062 th->cfp->lfp = th->stack;
01063 *th->cfp->lfp = 0;
01064 th->cfp->dfp = th->stack;
01065 th->cfp->self = Qnil;
01066 th->cfp->flag = 0;
01067 th->cfp->iseq = 0;
01068 th->cfp->proc = 0;
01069 th->cfp->block_iseq = 0;
01070 th->cfp->me = 0;
01071 th->tag = 0;
01072 th->local_storage = st_init_numtable();
01073
01074 th->first_proc = proc;
01075
01076 #if !FIBER_USE_NATIVE
01077 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
01078 #endif
01079
01080 return fibval;
01081 }
01082
01083
01084 static VALUE
01085 rb_fiber_init(VALUE fibval)
01086 {
01087 return fiber_init(fibval, rb_block_proc());
01088 }
01089
01090 VALUE
01091 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
01092 {
01093 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
01094 }
01095
01096 static VALUE
01097 return_fiber(void)
01098 {
01099 rb_fiber_t *fib;
01100 VALUE curr = rb_fiber_current();
01101 GetFiberPtr(curr, fib);
01102
01103 if (fib->prev == Qnil) {
01104 rb_thread_t *th = GET_THREAD();
01105
01106 if (th->root_fiber != curr) {
01107 return th->root_fiber;
01108 }
01109 else {
01110 rb_raise(rb_eFiberError, "can't yield from root fiber");
01111 }
01112 }
01113 else {
01114 VALUE prev = fib->prev;
01115 fib->prev = Qnil;
01116 return prev;
01117 }
01118 }
01119
01120 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
01121
01122 static void
01123 rb_fiber_terminate(rb_fiber_t *fib)
01124 {
01125 VALUE value = fib->cont.value;
01126 fib->status = TERMINATED;
01127 #if FIBER_USE_NATIVE && !defined(_WIN32)
01128
01129 terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
01130 terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
01131 fib->context.uc_stack.ss_sp = NULL;
01132 fib->cont.machine_stack = NULL;
01133 fib->cont.machine_stack_size = 0;
01134 #endif
01135 rb_fiber_transfer(return_fiber(), 1, &value);
01136 }
01137
01138 void
01139 rb_fiber_start(void)
01140 {
01141 rb_thread_t *th = GET_THREAD();
01142 rb_fiber_t *fib;
01143 rb_context_t *cont;
01144 rb_proc_t *proc;
01145 int state;
01146
01147 GetFiberPtr(th->fiber, fib);
01148 cont = &fib->cont;
01149
01150 TH_PUSH_TAG(th);
01151 if ((state = EXEC_TAG()) == 0) {
01152 int argc;
01153 VALUE *argv, args;
01154 GetProcPtr(cont->saved_thread.first_proc, proc);
01155 args = cont->value;
01156 argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
01157 cont->value = Qnil;
01158 th->errinfo = Qnil;
01159 th->local_lfp = proc->block.lfp;
01160 th->local_svar = Qnil;
01161
01162 fib->status = RUNNING;
01163 cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
01164 }
01165 TH_POP_TAG();
01166
01167 if (state) {
01168 if (state == TAG_RAISE) {
01169 th->thrown_errinfo = th->errinfo;
01170 }
01171 else {
01172 th->thrown_errinfo =
01173 rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
01174 }
01175 RUBY_VM_SET_INTERRUPT(th);
01176 }
01177
01178 rb_fiber_terminate(fib);
01179 rb_bug("rb_fiber_start: unreachable");
01180 }
01181
01182 static rb_fiber_t *
01183 root_fiber_alloc(rb_thread_t *th)
01184 {
01185 rb_fiber_t *fib;
01186
01187 fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
01188 fib->cont.type = ROOT_FIBER_CONTEXT;
01189 #if FIBER_USE_NATIVE
01190 #ifdef _WIN32
01191 fib->fib_handle = ConvertThreadToFiber(0);
01192 #endif
01193 #endif
01194 fib->status = RUNNING;
01195 fib->prev_fiber = fib->next_fiber = fib;
01196
01197 return fib;
01198 }
01199
01200 VALUE
01201 rb_fiber_current(void)
01202 {
01203 rb_thread_t *th = GET_THREAD();
01204 if (th->fiber == 0) {
01205
01206 rb_fiber_t *fib = root_fiber_alloc(th);
01207 th->root_fiber = th->fiber = fib->cont.self;
01208 }
01209 return th->fiber;
01210 }
01211
01212 static VALUE
01213 fiber_store(rb_fiber_t *next_fib)
01214 {
01215 rb_thread_t *th = GET_THREAD();
01216 rb_fiber_t *fib;
01217
01218 if (th->fiber) {
01219 GetFiberPtr(th->fiber, fib);
01220 cont_save_thread(&fib->cont, th);
01221 }
01222 else {
01223
01224 fib = root_fiber_alloc(th);
01225 th->root_fiber = th->fiber = fib->cont.self;
01226 }
01227
01228 #if !FIBER_USE_NATIVE
01229 cont_save_machine_stack(th, &fib->cont);
01230
01231 if (ruby_setjmp(fib->cont.jmpbuf)) {
01232 #else
01233 {
01234 fiber_setcontext(next_fib, fib);
01235 #ifndef _WIN32
01236 if (terminated_machine_stack.ptr) {
01237 if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
01238 machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
01239 machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
01240 machine_stack_cache_index++;
01241 }
01242 else {
01243 if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
01244 munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
01245 }
01246 else {
01247 rb_bug("terminated fiber resumed");
01248 }
01249 }
01250 terminated_machine_stack.ptr = NULL;
01251 terminated_machine_stack.size = 0;
01252 }
01253 #endif
01254 #endif
01255
01256 GetFiberPtr(th->fiber, fib);
01257 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
01258 return fib->cont.value;
01259 }
01260 #if !FIBER_USE_NATIVE
01261 else {
01262 return Qundef;
01263 }
01264 #endif
01265 }
01266
01267 static inline VALUE
01268 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
01269 {
01270 VALUE value;
01271 rb_fiber_t *fib;
01272 rb_context_t *cont;
01273 rb_thread_t *th = GET_THREAD();
01274
01275 GetFiberPtr(fibval, fib);
01276 cont = &fib->cont;
01277
01278 if (cont->saved_thread.self != th->self) {
01279 rb_raise(rb_eFiberError, "fiber called across threads");
01280 }
01281 else if (cont->saved_thread.protect_tag != th->protect_tag) {
01282 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
01283 }
01284 else if (fib->status == TERMINATED) {
01285 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
01286 if (th->fiber != fibval) {
01287 GetFiberPtr(th->fiber, fib);
01288 if (fib->status != TERMINATED) rb_exc_raise(value);
01289 fibval = th->root_fiber;
01290 }
01291 else {
01292 fibval = fib->prev;
01293 if (NIL_P(fibval)) fibval = th->root_fiber;
01294 }
01295 GetFiberPtr(fibval, fib);
01296 cont = &fib->cont;
01297 cont->argc = -1;
01298 cont->value = value;
01299 #if FIBER_USE_NATIVE
01300 {
01301 VALUE oldfibval;
01302 rb_fiber_t *oldfib;
01303 oldfibval = rb_fiber_current();
01304 GetFiberPtr(oldfibval, oldfib);
01305 fiber_setcontext(fib, oldfib);
01306 }
01307 #else
01308 cont_restore_0(cont, &value);
01309 #endif
01310 }
01311
01312 if (is_resume) {
01313 fib->prev = rb_fiber_current();
01314 }
01315
01316 cont->argc = argc;
01317 cont->value = make_passing_arg(argc, argv);
01318
01319 value = fiber_store(fib);
01320 #if !FIBER_USE_NATIVE
01321 if (value == Qundef) {
01322 cont_restore_0(cont, &value);
01323 rb_bug("rb_fiber_resume: unreachable");
01324 }
01325 #endif
01326 RUBY_VM_CHECK_INTS();
01327
01328 return value;
01329 }
01330
01331 VALUE
01332 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
01333 {
01334 return fiber_switch(fib, argc, argv, 0);
01335 }
01336
01337 VALUE
01338 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
01339 {
01340 rb_fiber_t *fib;
01341 GetFiberPtr(fibval, fib);
01342
01343 if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
01344 rb_raise(rb_eFiberError, "double resume");
01345 }
01346
01347 return fiber_switch(fibval, argc, argv, 1);
01348 }
01349
01350 VALUE
01351 rb_fiber_yield(int argc, VALUE *argv)
01352 {
01353 return rb_fiber_transfer(return_fiber(), argc, argv);
01354 }
01355
01356 void
01357 rb_fiber_reset_root_local_storage(VALUE thval)
01358 {
01359 rb_thread_t *th;
01360 rb_fiber_t *fib;
01361
01362 GetThreadPtr(thval, th);
01363 if (th->root_fiber && th->root_fiber != th->fiber) {
01364 GetFiberPtr(th->root_fiber, fib);
01365 th->local_storage = fib->cont.saved_thread.local_storage;
01366 }
01367 }
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378 VALUE
01379 rb_fiber_alive_p(VALUE fibval)
01380 {
01381 rb_fiber_t *fib;
01382 GetFiberPtr(fibval, fib);
01383 return fib->status != TERMINATED ? Qtrue : Qfalse;
01384 }
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401 static VALUE
01402 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
01403 {
01404 return rb_fiber_resume(fib, argc, argv);
01405 }
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416
01417
01418
01419
01420
01421
01422
01423
01424
01425 static VALUE
01426 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
01427 {
01428 return rb_fiber_transfer(fib, argc, argv);
01429 }
01430
01431
01432
01433
01434
01435
01436
01437
01438
01439
01440
01441 static VALUE
01442 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
01443 {
01444 return rb_fiber_yield(argc, argv);
01445 }
01446
01447
01448
01449
01450
01451
01452
01453
01454
01455 static VALUE
01456 rb_fiber_s_current(VALUE klass)
01457 {
01458 return rb_fiber_current();
01459 }
01460
01461
01462
01463
01464
01465
01466
01467
01468
01469
01470
01471
01472
01473
01474
01475
01476 void
01477 Init_Cont(void)
01478 {
01479 #if FIBER_USE_NATIVE
01480 rb_thread_t *th = GET_THREAD();
01481
01482 #ifdef _WIN32
01483 SYSTEM_INFO info;
01484 GetSystemInfo(&info);
01485 pagesize = info.dwPageSize;
01486 #else
01487 pagesize = sysconf(_SC_PAGESIZE);
01488 #endif
01489 SET_MACHINE_STACK_END(&th->machine_stack_end);
01490 #endif
01491
01492 rb_cFiber = rb_define_class("Fiber", rb_cObject);
01493 rb_define_alloc_func(rb_cFiber, fiber_alloc);
01494 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
01495 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
01496 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
01497 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
01498 }
01499
01500 #if defined __GNUC__ && __GNUC__ >= 4
01501 #pragma GCC visibility push(default)
01502 #endif
01503
01504 void
01505 ruby_Init_Continuation_body(void)
01506 {
01507 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
01508 rb_undef_alloc_func(rb_cContinuation);
01509 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
01510 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
01511 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
01512 rb_define_global_function("callcc", rb_callcc, 0);
01513 }
01514
01515 void
01516 ruby_Init_Fiber_as_Coroutine(void)
01517 {
01518 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
01519 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
01520 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
01521 }
01522
01523 #if defined __GNUC__ && __GNUC__ >= 4
01524 #pragma GCC visibility pop
01525 #endif
01526