00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00013
00014 #include "gc.h"
00015
00016 #ifdef HAVE_SYS_RESOURCE_H
00017 #include <sys/resource.h>
00018 #endif
00019 #ifdef HAVE_THR_STKSEGMENT
00020 #include <thread.h>
00021 #endif
00022 #if HAVE_FCNTL_H
00023 #include <fcntl.h>
00024 #elif HAVE_SYS_FCNTL_H
00025 #include <sys/fcntl.h>
00026 #endif
00027 #if defined(HAVE_SYS_TIME_H)
00028 #include <sys/time.h>
00029 #endif
00030
00031 static void native_mutex_lock(pthread_mutex_t *lock);
00032 static void native_mutex_unlock(pthread_mutex_t *lock);
00033 static int native_mutex_trylock(pthread_mutex_t *lock);
00034 static void native_mutex_initialize(pthread_mutex_t *lock);
00035 static void native_mutex_destroy(pthread_mutex_t *lock);
00036 static void native_cond_signal(rb_thread_cond_t *cond);
00037 static void native_cond_broadcast(rb_thread_cond_t *cond);
00038 static void native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex);
00039 static void native_cond_initialize(rb_thread_cond_t *cond, int flags);
00040 static void native_cond_destroy(rb_thread_cond_t *cond);
00041 static pthread_t timer_thread_id;
00042
00043 #define RB_CONDATTR_CLOCK_MONOTONIC 1
00044
00045 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
00046 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && defined(HAVE_CLOCK_GETTIME)
00047 #define USE_MONOTONIC_COND 1
00048 #else
00049 #define USE_MONOTONIC_COND 0
00050 #endif
00051
00052 static void
00053 gvl_acquire_common(rb_vm_t *vm)
00054 {
00055 if (vm->gvl.acquired) {
00056
00057 vm->gvl.waiting++;
00058 if (vm->gvl.waiting == 1) {
00059
00060 rb_thread_wakeup_timer_thread();
00061 }
00062
00063 while (vm->gvl.acquired) {
00064 native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
00065 }
00066
00067 vm->gvl.waiting--;
00068
00069 if (vm->gvl.need_yield) {
00070 vm->gvl.need_yield = 0;
00071 native_cond_signal(&vm->gvl.switch_cond);
00072 }
00073 }
00074
00075 vm->gvl.acquired = 1;
00076 }
00077
00078 static void
00079 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
00080 {
00081 native_mutex_lock(&vm->gvl.lock);
00082 gvl_acquire_common(vm);
00083 native_mutex_unlock(&vm->gvl.lock);
00084 }
00085
00086 static void
00087 gvl_release_common(rb_vm_t *vm)
00088 {
00089 vm->gvl.acquired = 0;
00090 if (vm->gvl.waiting > 0)
00091 native_cond_signal(&vm->gvl.cond);
00092 }
00093
00094 static void
00095 gvl_release(rb_vm_t *vm)
00096 {
00097 native_mutex_lock(&vm->gvl.lock);
00098 gvl_release_common(vm);
00099 native_mutex_unlock(&vm->gvl.lock);
00100 }
00101
00102 static void
00103 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
00104 {
00105 native_mutex_lock(&vm->gvl.lock);
00106
00107 gvl_release_common(vm);
00108
00109
00110 if (UNLIKELY(vm->gvl.wait_yield)) {
00111 while (vm->gvl.wait_yield)
00112 native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
00113 goto acquire;
00114 }
00115
00116 if (vm->gvl.waiting > 0) {
00117
00118 vm->gvl.need_yield = 1;
00119 vm->gvl.wait_yield = 1;
00120 while (vm->gvl.need_yield)
00121 native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
00122 vm->gvl.wait_yield = 0;
00123 }
00124 else {
00125 native_mutex_unlock(&vm->gvl.lock);
00126 sched_yield();
00127 native_mutex_lock(&vm->gvl.lock);
00128 }
00129
00130 native_cond_broadcast(&vm->gvl.switch_wait_cond);
00131 acquire:
00132 gvl_acquire_common(vm);
00133 native_mutex_unlock(&vm->gvl.lock);
00134 }
00135
00136 static void
00137 gvl_init(rb_vm_t *vm)
00138 {
00139 native_mutex_initialize(&vm->gvl.lock);
00140 native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
00141 native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00142 native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00143 vm->gvl.acquired = 0;
00144 vm->gvl.waiting = 0;
00145 vm->gvl.need_yield = 0;
00146 vm->gvl.wait_yield = 0;
00147 }
00148
00149 static void
00150 gvl_destroy(rb_vm_t *vm)
00151 {
00152 native_cond_destroy(&vm->gvl.switch_wait_cond);
00153 native_cond_destroy(&vm->gvl.switch_cond);
00154 native_cond_destroy(&vm->gvl.cond);
00155 native_mutex_destroy(&vm->gvl.lock);
00156 }
00157
00158 static void
00159 gvl_atfork(rb_vm_t *vm)
00160 {
00161 gvl_init(vm);
00162 gvl_acquire(vm, GET_THREAD());
00163 }
00164
00165 #define NATIVE_MUTEX_LOCK_DEBUG 0
00166
00167 static void
00168 mutex_debug(const char *msg, pthread_mutex_t *lock)
00169 {
00170 if (NATIVE_MUTEX_LOCK_DEBUG) {
00171 int r;
00172 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
00173
00174 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00175 fprintf(stdout, "%s: %p\n", msg, (void *)lock);
00176 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00177 }
00178 }
00179
00180 static void
00181 native_mutex_lock(pthread_mutex_t *lock)
00182 {
00183 int r;
00184 mutex_debug("lock", lock);
00185 if ((r = pthread_mutex_lock(lock)) != 0) {
00186 rb_bug_errno("pthread_mutex_lock", r);
00187 }
00188 }
00189
00190 static void
00191 native_mutex_unlock(pthread_mutex_t *lock)
00192 {
00193 int r;
00194 mutex_debug("unlock", lock);
00195 if ((r = pthread_mutex_unlock(lock)) != 0) {
00196 rb_bug_errno("pthread_mutex_unlock", r);
00197 }
00198 }
00199
00200 static inline int
00201 native_mutex_trylock(pthread_mutex_t *lock)
00202 {
00203 int r;
00204 mutex_debug("trylock", lock);
00205 if ((r = pthread_mutex_trylock(lock)) != 0) {
00206 if (r == EBUSY) {
00207 return EBUSY;
00208 }
00209 else {
00210 rb_bug_errno("pthread_mutex_trylock", r);
00211 }
00212 }
00213 return 0;
00214 }
00215
00216 static void
00217 native_mutex_initialize(pthread_mutex_t *lock)
00218 {
00219 int r = pthread_mutex_init(lock, 0);
00220 mutex_debug("init", lock);
00221 if (r != 0) {
00222 rb_bug_errno("pthread_mutex_init", r);
00223 }
00224 }
00225
00226 static void
00227 native_mutex_destroy(pthread_mutex_t *lock)
00228 {
00229 int r = pthread_mutex_destroy(lock);
00230 mutex_debug("destroy", lock);
00231 if (r != 0) {
00232 rb_bug_errno("pthread_mutex_destroy", r);
00233 }
00234 }
00235
00236 static void
00237 native_cond_initialize(rb_thread_cond_t *cond, int flags)
00238 {
00239 int r;
00240 pthread_condattr_t attr;
00241
00242 pthread_condattr_init(&attr);
00243
00244 #if USE_MONOTONIC_COND
00245 cond->clockid = CLOCK_REALTIME;
00246 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
00247 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
00248 if (r == 0) {
00249 cond->clockid = CLOCK_MONOTONIC;
00250 }
00251 }
00252 #endif
00253
00254 r = pthread_cond_init(&cond->cond, &attr);
00255 pthread_condattr_destroy(&attr);
00256 if (r != 0) {
00257 rb_bug_errno("pthread_cond_init", r);
00258 }
00259
00260 return;
00261 }
00262
00263 static void
00264 native_cond_destroy(rb_thread_cond_t *cond)
00265 {
00266 int r = pthread_cond_destroy(&cond->cond);
00267 if (r != 0) {
00268 rb_bug_errno("pthread_cond_destroy", r);
00269 }
00270 }
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282 static void
00283 native_cond_signal(rb_thread_cond_t *cond)
00284 {
00285 int r;
00286 do {
00287 r = pthread_cond_signal(&cond->cond);
00288 } while (r == EAGAIN);
00289 if (r != 0) {
00290 rb_bug_errno("pthread_cond_signal", r);
00291 }
00292 }
00293
00294 static void
00295 native_cond_broadcast(rb_thread_cond_t *cond)
00296 {
00297 int r;
00298 do {
00299 r = pthread_cond_broadcast(&cond->cond);
00300 } while (r == EAGAIN);
00301 if (r != 0) {
00302 rb_bug_errno("native_cond_broadcast", r);
00303 }
00304 }
00305
00306 static void
00307 native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex)
00308 {
00309 int r = pthread_cond_wait(&cond->cond, mutex);
00310 if (r != 0) {
00311 rb_bug_errno("pthread_cond_wait", r);
00312 }
00313 }
00314
00315 static int
00316 native_cond_timedwait(rb_thread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
00317 {
00318 int r;
00319
00320
00321
00322
00323
00324
00325
00326 do {
00327 r = pthread_cond_timedwait(&cond->cond, mutex, ts);
00328 } while (r == EINTR);
00329
00330 if (r != 0 && r != ETIMEDOUT) {
00331 rb_bug_errno("pthread_cond_timedwait", r);
00332 }
00333
00334 return r;
00335 }
00336
00337 #if SIZEOF_TIME_T == SIZEOF_LONG
00338 typedef unsigned long unsigned_time_t;
00339 #elif SIZEOF_TIME_T == SIZEOF_INT
00340 typedef unsigned int unsigned_time_t;
00341 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
00342 typedef unsigned LONG_LONG unsigned_time_t;
00343 #else
00344 # error cannot find integer type which size is same as time_t.
00345 #endif
00346
00347 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
00348
00349 static struct timespec
00350 native_cond_timeout(rb_thread_cond_t *cond, struct timespec timeout_rel)
00351 {
00352 int ret;
00353 struct timeval tv;
00354 struct timespec timeout;
00355 struct timespec now;
00356
00357 #if USE_MONOTONIC_COND
00358 if (cond->clockid == CLOCK_MONOTONIC) {
00359 ret = clock_gettime(cond->clockid, &now);
00360 if (ret != 0)
00361 rb_sys_fail("clock_gettime()");
00362 goto out;
00363 }
00364
00365 if (cond->clockid != CLOCK_REALTIME)
00366 rb_bug("unsupported clockid %d", cond->clockid);
00367 #endif
00368
00369 ret = gettimeofday(&tv, 0);
00370 if (ret != 0)
00371 rb_sys_fail(0);
00372 now.tv_sec = tv.tv_sec;
00373 now.tv_nsec = tv.tv_usec * 1000;
00374
00375 #if USE_MONOTONIC_COND
00376 out:
00377 #endif
00378 timeout.tv_sec = now.tv_sec;
00379 timeout.tv_nsec = now.tv_nsec;
00380 timeout.tv_sec += timeout_rel.tv_sec;
00381 timeout.tv_nsec += timeout_rel.tv_nsec;
00382
00383 if (timeout.tv_nsec >= 1000*1000*1000) {
00384 timeout.tv_sec++;
00385 timeout.tv_nsec -= 1000*1000*1000;
00386 }
00387
00388 if (timeout.tv_sec < now.tv_sec)
00389 timeout.tv_sec = TIMET_MAX;
00390
00391 return timeout;
00392 }
00393
00394 #define native_cleanup_push pthread_cleanup_push
00395 #define native_cleanup_pop pthread_cleanup_pop
00396 #ifdef HAVE_SCHED_YIELD
00397 #define native_thread_yield() (void)sched_yield()
00398 #else
00399 #define native_thread_yield() ((void)0)
00400 #endif
00401
00402 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00403 #define USE_SIGNAL_THREAD_LIST 1
00404 #endif
00405 #ifdef USE_SIGNAL_THREAD_LIST
00406 static void add_signal_thread_list(rb_thread_t *th);
00407 static void remove_signal_thread_list(rb_thread_t *th);
00408 static rb_thread_lock_t signal_thread_list_lock;
00409 #endif
00410
00411 static pthread_key_t ruby_native_thread_key;
00412
00413 static void
00414 null_func(int i)
00415 {
00416
00417 }
00418
00419 static rb_thread_t *
00420 ruby_thread_from_native(void)
00421 {
00422 return pthread_getspecific(ruby_native_thread_key);
00423 }
00424
00425 static int
00426 ruby_thread_set_native(rb_thread_t *th)
00427 {
00428 return pthread_setspecific(ruby_native_thread_key, th) == 0;
00429 }
00430
00431 static void native_thread_init(rb_thread_t *th);
00432
00433 void
00434 Init_native_thread(void)
00435 {
00436 rb_thread_t *th = GET_THREAD();
00437
00438 pthread_key_create(&ruby_native_thread_key, NULL);
00439 th->thread_id = pthread_self();
00440 native_thread_init(th);
00441 #ifdef USE_SIGNAL_THREAD_LIST
00442 native_mutex_initialize(&signal_thread_list_lock);
00443 #endif
00444 posix_signal(SIGVTALRM, null_func);
00445 }
00446
00447 static void
00448 native_thread_init(rb_thread_t *th)
00449 {
00450 native_cond_initialize(&th->native_thread_data.sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00451 ruby_thread_set_native(th);
00452 }
00453
00454 static void
00455 native_thread_destroy(rb_thread_t *th)
00456 {
00457 native_cond_destroy(&th->native_thread_data.sleep_cond);
00458 }
00459
00460 #define USE_THREAD_CACHE 0
00461
00462 #if USE_THREAD_CACHE
00463 static rb_thread_t *register_cached_thread_and_wait(void);
00464 #endif
00465
00466 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00467 #define STACKADDR_AVAILABLE 1
00468 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00469 #define STACKADDR_AVAILABLE 1
00470 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00471 #define STACKADDR_AVAILABLE 1
00472 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00473 #define STACKADDR_AVAILABLE 1
00474 #endif
00475
00476 #ifdef STACKADDR_AVAILABLE
00477
00478
00479
00480 static int
00481 get_stack(void **addr, size_t *size)
00482 {
00483 #define CHECK_ERR(expr) \
00484 {int err = (expr); if (err) return err;}
00485 #ifdef HAVE_PTHREAD_GETATTR_NP
00486 pthread_attr_t attr;
00487 size_t guard = 0;
00488 STACK_GROW_DIR_DETECTION;
00489 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
00490 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00491 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00492 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00493 # else
00494 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00495 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00496 # endif
00497 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
00498 *size -= guard;
00499 pthread_attr_destroy(&attr);
00500 #elif defined HAVE_PTHREAD_ATTR_GET_NP
00501 pthread_attr_t attr;
00502 CHECK_ERR(pthread_attr_init(&attr));
00503 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
00504 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00505 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00506 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00507 # else
00508 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00509 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00510 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00511 # endif
00512 pthread_attr_destroy(&attr);
00513 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
00514 pthread_t th = pthread_self();
00515 *addr = pthread_get_stackaddr_np(th);
00516 *size = pthread_get_stacksize_np(th);
00517 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00518 stack_t stk;
00519 # if defined HAVE_THR_STKSEGMENT
00520 CHECK_ERR(thr_stksegment(&stk));
00521 # else
00522 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
00523 # endif
00524 *addr = stk.ss_sp;
00525 *size = stk.ss_size;
00526 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00527 pthread_t th = pthread_self();
00528 struct __pthrdsinfo thinfo;
00529 char reg[256];
00530 int regsiz=sizeof(reg);
00531 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
00532 &thinfo, sizeof(thinfo),
00533 ®, ®siz));
00534 *addr = thinfo.__pi_stackaddr;
00535 *size = thinfo.__pi_stacksize;
00536 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00537 #else
00538 #error STACKADDR_AVAILABLE is defined but not implemented.
00539 #endif
00540 return 0;
00541 #undef CHECK_ERR
00542 }
00543 #endif
00544
00545 static struct {
00546 rb_thread_id_t id;
00547 size_t stack_maxsize;
00548 VALUE *stack_start;
00549 #ifdef __ia64
00550 VALUE *register_stack_start;
00551 #endif
00552 } native_main_thread;
00553
00554 #ifdef STACK_END_ADDRESS
00555 extern void *STACK_END_ADDRESS;
00556 #endif
00557
00558 #undef ruby_init_stack
00559 void
00560 ruby_init_stack(volatile VALUE *addr
00561 #ifdef __ia64
00562 , void *bsp
00563 #endif
00564 )
00565 {
00566 native_main_thread.id = pthread_self();
00567 #ifdef STACK_END_ADDRESS
00568 native_main_thread.stack_start = STACK_END_ADDRESS;
00569 #else
00570 if (!native_main_thread.stack_start ||
00571 STACK_UPPER((VALUE *)(void *)&addr,
00572 native_main_thread.stack_start > addr,
00573 native_main_thread.stack_start < addr)) {
00574 native_main_thread.stack_start = (VALUE *)addr;
00575 }
00576 #endif
00577 #ifdef __ia64
00578 if (!native_main_thread.register_stack_start ||
00579 (VALUE*)bsp < native_main_thread.register_stack_start) {
00580 native_main_thread.register_stack_start = (VALUE*)bsp;
00581 }
00582 #endif
00583 {
00584 size_t size = 0;
00585 size_t space = 0;
00586 #if defined(STACKADDR_AVAILABLE)
00587 void* stackaddr;
00588 STACK_GROW_DIR_DETECTION;
00589 get_stack(&stackaddr, &size);
00590 space = STACK_DIR_UPPER((char *)addr - (char *)stackaddr, (char *)stackaddr - (char *)addr);
00591 #elif defined(HAVE_GETRLIMIT)
00592 struct rlimit rlim;
00593 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
00594 size = (size_t)rlim.rlim_cur;
00595 }
00596 space = size > 5 * 1024 * 1024 ? 1024 * 1024 : size / 5;
00597 #endif
00598 native_main_thread.stack_maxsize = size - space;
00599 }
00600 }
00601
00602 #define CHECK_ERR(expr) \
00603 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
00604
00605 static int
00606 native_thread_init_stack(rb_thread_t *th)
00607 {
00608 rb_thread_id_t curr = pthread_self();
00609
00610 if (pthread_equal(curr, native_main_thread.id)) {
00611 th->machine_stack_start = native_main_thread.stack_start;
00612 th->machine_stack_maxsize = native_main_thread.stack_maxsize;
00613 }
00614 else {
00615 #ifdef STACKADDR_AVAILABLE
00616 void *start;
00617 size_t size;
00618
00619 if (get_stack(&start, &size) == 0) {
00620 th->machine_stack_start = start;
00621 th->machine_stack_maxsize = size;
00622 }
00623 #else
00624 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
00625 #endif
00626 }
00627 #ifdef __ia64
00628 th->machine_register_stack_start = native_main_thread.register_stack_start;
00629 th->machine_stack_maxsize /= 2;
00630 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00631 #endif
00632 return 0;
00633 }
00634
00635 #ifndef __CYGWIN__
00636 #define USE_NATIVE_THREAD_INIT 1
00637 #endif
00638
00639 static void *
00640 thread_start_func_1(void *th_ptr)
00641 {
00642 #if USE_THREAD_CACHE
00643 thread_start:
00644 #endif
00645 {
00646 rb_thread_t *th = th_ptr;
00647 #if !defined USE_NATIVE_THREAD_INIT
00648 VALUE stack_start;
00649 #endif
00650
00651 #if defined USE_NATIVE_THREAD_INIT
00652 native_thread_init_stack(th);
00653 #endif
00654 native_thread_init(th);
00655
00656 #if defined USE_NATIVE_THREAD_INIT
00657 thread_start_func_2(th, th->machine_stack_start, rb_ia64_bsp());
00658 #else
00659 thread_start_func_2(th, &stack_start, rb_ia64_bsp());
00660 #endif
00661 }
00662 #if USE_THREAD_CACHE
00663 if (1) {
00664
00665 rb_thread_t *th;
00666 if ((th = register_cached_thread_and_wait()) != 0) {
00667 th_ptr = (void *)th;
00668 th->thread_id = pthread_self();
00669 goto thread_start;
00670 }
00671 }
00672 #endif
00673 return 0;
00674 }
00675
00676 struct cached_thread_entry {
00677 volatile rb_thread_t **th_area;
00678 rb_thread_cond_t *cond;
00679 struct cached_thread_entry *next;
00680 };
00681
00682
00683 #if USE_THREAD_CACHE
00684 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
00685 struct cached_thread_entry *cached_thread_root;
00686
00687 static rb_thread_t *
00688 register_cached_thread_and_wait(void)
00689 {
00690 rb_thread_cond_t cond = { PTHREAD_COND_INITIALIZER, };
00691 volatile rb_thread_t *th_area = 0;
00692 struct cached_thread_entry *entry =
00693 (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
00694
00695 struct timeval tv;
00696 struct timespec ts;
00697 gettimeofday(&tv, 0);
00698 ts.tv_sec = tv.tv_sec + 60;
00699 ts.tv_nsec = tv.tv_usec * 1000;
00700
00701 pthread_mutex_lock(&thread_cache_lock);
00702 {
00703 entry->th_area = &th_area;
00704 entry->cond = &cond;
00705 entry->next = cached_thread_root;
00706 cached_thread_root = entry;
00707
00708 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
00709
00710 {
00711 struct cached_thread_entry *e = cached_thread_root;
00712 struct cached_thread_entry *prev = cached_thread_root;
00713
00714 while (e) {
00715 if (e == entry) {
00716 if (prev == cached_thread_root) {
00717 cached_thread_root = e->next;
00718 }
00719 else {
00720 prev->next = e->next;
00721 }
00722 break;
00723 }
00724 prev = e;
00725 e = e->next;
00726 }
00727 }
00728
00729 free(entry);
00730 native_cond_destroy(&cond);
00731 }
00732 pthread_mutex_unlock(&thread_cache_lock);
00733
00734 return (rb_thread_t *)th_area;
00735 }
00736 #endif
00737
00738 static int
00739 use_cached_thread(rb_thread_t *th)
00740 {
00741 int result = 0;
00742 #if USE_THREAD_CACHE
00743 struct cached_thread_entry *entry;
00744
00745 if (cached_thread_root) {
00746 pthread_mutex_lock(&thread_cache_lock);
00747 entry = cached_thread_root;
00748 {
00749 if (cached_thread_root) {
00750 cached_thread_root = entry->next;
00751 *entry->th_area = th;
00752 result = 1;
00753 }
00754 }
00755 if (result) {
00756 native_cond_signal(entry->cond);
00757 }
00758 pthread_mutex_unlock(&thread_cache_lock);
00759 }
00760 #endif
00761 return result;
00762 }
00763
00764 enum {
00765 #ifdef __SYMBIAN32__
00766 RUBY_STACK_MIN_LIMIT = 64 * 1024,
00767 #else
00768 RUBY_STACK_MIN_LIMIT = 512 * 1024,
00769 #endif
00770 RUBY_STACK_SPACE_LIMIT = 1024 * 1024
00771 };
00772
00773 #ifdef PTHREAD_STACK_MIN
00774 #define RUBY_STACK_MIN ((RUBY_STACK_MIN_LIMIT < PTHREAD_STACK_MIN) ? \
00775 PTHREAD_STACK_MIN * 2 : RUBY_STACK_MIN_LIMIT)
00776 #else
00777 #define RUBY_STACK_MIN (RUBY_STACK_MIN_LIMIT)
00778 #endif
00779 #define RUBY_STACK_SPACE (RUBY_STACK_MIN/5 > RUBY_STACK_SPACE_LIMIT ? \
00780 RUBY_STACK_SPACE_LIMIT : RUBY_STACK_MIN/5)
00781
00782 static int
00783 native_thread_create(rb_thread_t *th)
00784 {
00785 int err = 0;
00786
00787 if (use_cached_thread(th)) {
00788 thread_debug("create (use cached thread): %p\n", (void *)th);
00789 }
00790 else {
00791 pthread_attr_t attr;
00792 const size_t stack_size = RUBY_STACK_MIN;
00793 const size_t space = RUBY_STACK_SPACE;
00794
00795 th->machine_stack_maxsize = stack_size - space;
00796 #ifdef __ia64
00797 th->machine_stack_maxsize /= 2;
00798 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00799 #endif
00800
00801 CHECK_ERR(pthread_attr_init(&attr));
00802
00803 #ifdef PTHREAD_STACK_MIN
00804 thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
00805 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
00806 #endif
00807
00808 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
00809 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
00810 #endif
00811 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
00812
00813 err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
00814 thread_debug("create: %p (%d)\n", (void *)th, err);
00815 CHECK_ERR(pthread_attr_destroy(&attr));
00816 }
00817 return err;
00818 }
00819
00820 static void
00821 native_thread_join(pthread_t th)
00822 {
00823 int err = pthread_join(th, 0);
00824 if (err) {
00825 rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
00826 }
00827 }
00828
00829
00830 #if USE_NATIVE_THREAD_PRIORITY
00831
00832 static void
00833 native_thread_apply_priority(rb_thread_t *th)
00834 {
00835 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
00836 struct sched_param sp;
00837 int policy;
00838 int priority = 0 - th->priority;
00839 int max, min;
00840 pthread_getschedparam(th->thread_id, &policy, &sp);
00841 max = sched_get_priority_max(policy);
00842 min = sched_get_priority_min(policy);
00843
00844 if (min > priority) {
00845 priority = min;
00846 }
00847 else if (max < priority) {
00848 priority = max;
00849 }
00850
00851 sp.sched_priority = priority;
00852 pthread_setschedparam(th->thread_id, policy, &sp);
00853 #else
00854
00855 #endif
00856 }
00857
00858 #endif
00859
00860 static void
00861 ubf_pthread_cond_signal(void *ptr)
00862 {
00863 rb_thread_t *th = (rb_thread_t *)ptr;
00864 thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
00865 native_cond_signal(&th->native_thread_data.sleep_cond);
00866 }
00867
00868 static void
00869 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
00870 {
00871 struct timespec timeout;
00872 pthread_mutex_t *lock = &th->interrupt_lock;
00873 rb_thread_cond_t *cond = &th->native_thread_data.sleep_cond;
00874
00875 if (timeout_tv) {
00876 struct timespec timeout_rel;
00877
00878 timeout_rel.tv_sec = timeout_tv->tv_sec;
00879 timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889 if (timeout_rel.tv_sec > 100000000) {
00890 timeout_rel.tv_sec = 100000000;
00891 timeout_rel.tv_nsec = 0;
00892 }
00893
00894 timeout = native_cond_timeout(cond, timeout_rel);
00895 }
00896
00897 GVL_UNLOCK_BEGIN();
00898 {
00899 pthread_mutex_lock(lock);
00900 th->unblock.func = ubf_pthread_cond_signal;
00901 th->unblock.arg = th;
00902
00903 if (RUBY_VM_INTERRUPTED(th)) {
00904
00905 thread_debug("native_sleep: interrupted before sleep\n");
00906 }
00907 else {
00908 if (!timeout_tv)
00909 native_cond_wait(cond, lock);
00910 else
00911 native_cond_timedwait(cond, lock, &timeout);
00912 }
00913 th->unblock.func = 0;
00914 th->unblock.arg = 0;
00915
00916 pthread_mutex_unlock(lock);
00917 }
00918 GVL_UNLOCK_END();
00919
00920 thread_debug("native_sleep done\n");
00921 }
00922
00923 #ifdef USE_SIGNAL_THREAD_LIST
00924 struct signal_thread_list {
00925 rb_thread_t *th;
00926 struct signal_thread_list *prev;
00927 struct signal_thread_list *next;
00928 };
00929
00930 static struct signal_thread_list signal_thread_list_anchor = {
00931 0, 0, 0,
00932 };
00933
00934 #define FGLOCK(lock, body) do { \
00935 native_mutex_lock(lock); \
00936 { \
00937 body; \
00938 } \
00939 native_mutex_unlock(lock); \
00940 } while (0)
00941
00942 #if 0
00943 static void
00944 print_signal_list(char *str)
00945 {
00946 struct signal_thread_list *list =
00947 signal_thread_list_anchor.next;
00948 thread_debug("list (%s)> ", str);
00949 while(list){
00950 thread_debug("%p (%p), ", list->th, list->th->thread_id);
00951 list = list->next;
00952 }
00953 thread_debug("\n");
00954 }
00955 #endif
00956
00957 static void
00958 add_signal_thread_list(rb_thread_t *th)
00959 {
00960 if (!th->native_thread_data.signal_thread_list) {
00961 FGLOCK(&signal_thread_list_lock, {
00962 struct signal_thread_list *list =
00963 malloc(sizeof(struct signal_thread_list));
00964
00965 if (list == 0) {
00966 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00967 exit(EXIT_FAILURE);
00968 }
00969
00970 list->th = th;
00971
00972 list->prev = &signal_thread_list_anchor;
00973 list->next = signal_thread_list_anchor.next;
00974 if (list->next) {
00975 list->next->prev = list;
00976 }
00977 signal_thread_list_anchor.next = list;
00978 th->native_thread_data.signal_thread_list = list;
00979 });
00980 }
00981 }
00982
00983 static void
00984 remove_signal_thread_list(rb_thread_t *th)
00985 {
00986 if (th->native_thread_data.signal_thread_list) {
00987 FGLOCK(&signal_thread_list_lock, {
00988 struct signal_thread_list *list =
00989 (struct signal_thread_list *)
00990 th->native_thread_data.signal_thread_list;
00991
00992 list->prev->next = list->next;
00993 if (list->next) {
00994 list->next->prev = list->prev;
00995 }
00996 th->native_thread_data.signal_thread_list = 0;
00997 list->th = 0;
00998 free(list);
00999 });
01000 }
01001 }
01002
01003 static void
01004 ubf_select_each(rb_thread_t *th)
01005 {
01006 thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
01007 if (th) {
01008 pthread_kill(th->thread_id, SIGVTALRM);
01009 }
01010 }
01011
01012 static void
01013 ubf_select(void *ptr)
01014 {
01015 rb_thread_t *th = (rb_thread_t *)ptr;
01016 add_signal_thread_list(th);
01017 if (pthread_self() != timer_thread_id)
01018 rb_thread_wakeup_timer_thread();
01019 ubf_select_each(th);
01020 }
01021
01022 static void
01023 ping_signal_thread_list(void) {
01024 if (signal_thread_list_anchor.next) {
01025 FGLOCK(&signal_thread_list_lock, {
01026 struct signal_thread_list *list;
01027
01028 list = signal_thread_list_anchor.next;
01029 while (list) {
01030 ubf_select_each(list->th);
01031 list = list->next;
01032 }
01033 });
01034 }
01035 }
01036
01037 static int
01038 check_signal_thread_list(void)
01039 {
01040 if (signal_thread_list_anchor.next)
01041 return 1;
01042 else
01043 return 0;
01044 }
01045 #else
01046 static void add_signal_thread_list(rb_thread_t *th) { }
01047 static void remove_signal_thread_list(rb_thread_t *th) { }
01048 #define ubf_select 0
01049 static void ping_signal_thread_list(void) { return; }
01050 static int check_signal_thread_list(void) { return 0; }
01051 #endif
01052
01053 static int timer_thread_pipe[2] = {-1, -1};
01054 static int timer_thread_pipe_owner_process;
01055
01056 #define TT_DEBUG 0
01057
01058 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
01059
01060
01061 void
01062 rb_thread_wakeup_timer_thread(void)
01063 {
01064 ssize_t result;
01065
01066
01067 if (timer_thread_pipe_owner_process == getpid()) {
01068 const char *buff = "!";
01069 retry:
01070 if ((result = write(timer_thread_pipe[1], buff, 1)) <= 0) {
01071 switch (errno) {
01072 case EINTR: goto retry;
01073 case EAGAIN:
01074 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
01075 case EWOULDBLOCK:
01076 #endif
01077 break;
01078 default:
01079 rb_async_bug_errno("rb_thread_wakeup_timer_thread - write", errno);
01080 }
01081 }
01082 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
01083 }
01084 else {
01085
01086 }
01087 }
01088
01089
01090 static void
01091 consume_communication_pipe(void)
01092 {
01093 #define CCP_READ_BUFF_SIZE 1024
01094
01095 static char buff[CCP_READ_BUFF_SIZE];
01096 ssize_t result;
01097
01098 retry:
01099 result = read(timer_thread_pipe[0], buff, CCP_READ_BUFF_SIZE);
01100 if (result < 0) {
01101 switch (errno) {
01102 case EINTR: goto retry;
01103 default:
01104 rb_async_bug_errno("consume_communication_pipe: read\n", errno);
01105 }
01106 }
01107 }
01108
01109 static void
01110 close_communication_pipe(void)
01111 {
01112 if (close(timer_thread_pipe[0]) < 0) {
01113 rb_bug_errno("native_stop_timer_thread - close(ttp[0])", errno);
01114 }
01115 if (close(timer_thread_pipe[1]) < 0) {
01116 rb_bug_errno("native_stop_timer_thread - close(ttp[1])", errno);
01117 }
01118 timer_thread_pipe[0] = timer_thread_pipe[1] = -1;
01119 }
01120
01121
01122
01123
01124 #define TIME_QUANTUM_USEC (100 * 1000)
01125
01126 static void *
01127 thread_timer(void *p)
01128 {
01129 rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
01130 int result;
01131 struct timeval timeout;
01132
01133 if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
01134
01135 while (system_working > 0) {
01136 fd_set rfds;
01137 int need_polling;
01138
01139
01140 ping_signal_thread_list();
01141 timer_thread_function(0);
01142 need_polling = check_signal_thread_list();
01143
01144 if (TT_DEBUG) WRITE_CONST(2, "tick\n");
01145
01146
01147 FD_ZERO(&rfds);
01148 FD_SET(timer_thread_pipe[0], &rfds);
01149
01150 if (gvl->waiting > 0 || need_polling) {
01151 timeout.tv_sec = 0;
01152 timeout.tv_usec = TIME_QUANTUM_USEC;
01153
01154
01155 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, &timeout);
01156 }
01157 else {
01158
01159 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, 0);
01160 }
01161
01162 if (result == 0) {
01163
01164 }
01165 else if (result > 0) {
01166 consume_communication_pipe();
01167 }
01168 else {
01169 switch (errno) {
01170 case EBADF:
01171 case EINVAL:
01172 case ENOMEM:
01173 case EFAULT:
01174 rb_async_bug_errno("thread_timer: select", errno);
01175 default:
01176 ;
01177 }
01178 }
01179 }
01180
01181 if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
01182 return NULL;
01183 }
01184
01185 static void
01186 rb_thread_create_timer_thread(void)
01187 {
01188 rb_enable_interrupt();
01189
01190 if (!timer_thread_id) {
01191 pthread_attr_t attr;
01192 int err;
01193
01194 pthread_attr_init(&attr);
01195 #ifdef PTHREAD_STACK_MIN
01196 {
01197 const size_t min_size = (4096 * 4);
01198
01199
01200
01201
01202 size_t stack_size = PTHREAD_STACK_MIN;
01203 if (stack_size < min_size) stack_size = min_size;
01204 if (THREAD_DEBUG) stack_size += BUFSIZ;
01205 pthread_attr_setstacksize(&attr, stack_size);
01206 }
01207 #endif
01208
01209
01210 if (timer_thread_pipe_owner_process != getpid()) {
01211 if (timer_thread_pipe[0] != -1) {
01212
01213 close_communication_pipe();
01214 }
01215
01216 err = pipe(timer_thread_pipe);
01217 if (err != 0) {
01218 rb_bug_errno("thread_timer: Failed to create communication pipe for timer thread", errno);
01219 }
01220 rb_update_max_fd(timer_thread_pipe[0]);
01221 rb_update_max_fd(timer_thread_pipe[1]);
01222 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL)
01223 {
01224 int oflags;
01225 #if defined(O_NONBLOCK)
01226 oflags = fcntl(timer_thread_pipe[1], F_GETFL);
01227 oflags |= O_NONBLOCK;
01228 fcntl(timer_thread_pipe[1], F_SETFL, oflags);
01229 #endif
01230 #if defined(FD_CLOEXEC)
01231 oflags = fcntl(timer_thread_pipe[0], F_GETFD);
01232 fcntl(timer_thread_pipe[0], F_SETFD, oflags | FD_CLOEXEC);
01233 oflags = fcntl(timer_thread_pipe[1], F_GETFD);
01234 fcntl(timer_thread_pipe[1], F_SETFD, oflags | FD_CLOEXEC);
01235 #endif
01236 }
01237 #endif
01238
01239
01240 timer_thread_pipe_owner_process = getpid();
01241 }
01242
01243
01244 if (timer_thread_id) {
01245 rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
01246 }
01247 err = pthread_create(&timer_thread_id, &attr, thread_timer, &GET_VM()->gvl);
01248 if (err != 0) {
01249 fprintf(stderr, "[FATAL] Failed to create timer thread (errno: %d)\n", err);
01250 exit(EXIT_FAILURE);
01251 }
01252 pthread_attr_destroy(&attr);
01253 }
01254
01255 rb_disable_interrupt();
01256 }
01257
01258 static int
01259 native_stop_timer_thread(int close_anyway)
01260 {
01261 int stopped;
01262 stopped = --system_working <= 0;
01263
01264 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
01265 if (stopped) {
01266
01267 rb_thread_wakeup_timer_thread();
01268 native_thread_join(timer_thread_id);
01269 if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
01270 timer_thread_id = 0;
01271
01272
01273 if (close_anyway) {
01274
01275
01276
01277
01278
01279
01280
01281
01282 }
01283 }
01284 return stopped;
01285 }
01286
01287 static void
01288 native_reset_timer_thread(void)
01289 {
01290 if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
01291 }
01292
01293 #ifdef HAVE_SIGALTSTACK
01294 int
01295 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
01296 {
01297 void *base;
01298 size_t size;
01299 const size_t water_mark = 1024 * 1024;
01300 STACK_GROW_DIR_DETECTION;
01301
01302 if (th) {
01303 size = th->machine_stack_maxsize;
01304 base = (char *)th->machine_stack_start - STACK_DIR_UPPER(0, size);
01305 }
01306 #ifdef STACKADDR_AVAILABLE
01307 else if (get_stack(&base, &size) == 0) {
01308 STACK_DIR_UPPER((void)(base = (char *)base + size), (void)0);
01309 }
01310 #endif
01311 else {
01312 return 0;
01313 }
01314 size /= 5;
01315 if (size > water_mark) size = water_mark;
01316 if (IS_STACK_DIR_UPPER()) {
01317 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
01318 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
01319 }
01320 else {
01321 if (size > (size_t)base) size = (size_t)base;
01322 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
01323 }
01324 return 0;
01325 }
01326 #endif
01327
01328 int
01329 rb_reserved_fd_p(int fd)
01330 {
01331 if (fd == timer_thread_pipe[0] ||
01332 fd == timer_thread_pipe[1]) {
01333 return 1;
01334 }
01335 else {
01336 return 0;
01337 }
01338 }
01339
01340 #endif
01341