thread.c

Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   thread.c -
00004 
00005   $Author: yugui $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 /*
00012   YARV Thread Design
00013 
00014   model 1: Userlevel Thread
00015     Same as traditional ruby thread.
00016 
00017   model 2: Native Thread with Global VM lock
00018     Using pthread (or Windows thread) and Ruby threads run concurrent.
00019 
00020   model 3: Native Thread with fine grain lock
00021     Using pthread and Ruby threads run concurrent or parallel.
00022 
00023 ------------------------------------------------------------------------
00024 
00025   model 2:
00026     A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
00027     When thread scheduling, running thread release GVL.  If running thread
00028     try blocking operation, this thread must release GVL and another
00029     thread can continue this flow.  After blocking operation, thread
00030     must check interrupt (RUBY_VM_CHECK_INTS).
00031 
00032     Every VM can run parallel.
00033 
00034     Ruby threads are scheduled by OS thread scheduler.
00035 
00036 ------------------------------------------------------------------------
00037 
00038   model 3:
00039     Every threads run concurrent or parallel and to access shared object
00040     exclusive access control is needed.  For example, to access String
00041     object or Array object, fine grain lock must be locked every time.
00042  */
00043 
00044 
00045 /* for model 2 */
00046 
00047 #include "eval_intern.h"
00048 #include "gc.h"
00049 
00050 #ifndef USE_NATIVE_THREAD_PRIORITY
00051 #define USE_NATIVE_THREAD_PRIORITY 0
00052 #define RUBY_THREAD_PRIORITY_MAX 3
00053 #define RUBY_THREAD_PRIORITY_MIN -3
00054 #endif
00055 
00056 #ifndef THREAD_DEBUG
00057 #define THREAD_DEBUG 0
00058 #endif
00059 
00060 VALUE rb_cMutex;
00061 VALUE rb_cBarrier;
00062 
00063 static void sleep_timeval(rb_thread_t *th, struct timeval time);
00064 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
00065 static void sleep_forever(rb_thread_t *th, int nodeadlock);
00066 static double timeofday(void);
00067 struct timeval rb_time_interval(VALUE);
00068 static int rb_threadptr_dead(rb_thread_t *th);
00069 
00070 static void rb_check_deadlock(rb_vm_t *vm);
00071 
00072 int rb_signal_buff_size(void);
00073 void rb_signal_exec(rb_thread_t *th, int sig);
00074 void rb_disable_interrupt(void);
00075 void rb_thread_stop_timer_thread(void);
00076 
00077 static const VALUE eKillSignal = INT2FIX(0);
00078 static const VALUE eTerminateSignal = INT2FIX(1);
00079 static volatile int system_working = 1;
00080 
00081 inline static void
00082 st_delete_wrap(st_table *table, st_data_t key)
00083 {
00084     st_delete(table, &key, 0);
00085 }
00086 
00087 /********************************************************************************/
00088 
00089 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00090 
00091 struct rb_blocking_region_buffer {
00092     enum rb_thread_status prev_status;
00093     struct rb_unblock_callback oldubf;
00094 };
00095 
00096 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00097                                  struct rb_unblock_callback *old);
00098 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
00099 
00100 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
00101 
00102 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
00103   do { \
00104     rb_gc_save_machine_context(th); \
00105     SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
00106   } while (0)
00107 
00108 #define GVL_UNLOCK_BEGIN() do { \
00109   rb_thread_t *_th_stored = GET_THREAD(); \
00110   RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
00111   native_mutex_unlock(&_th_stored->vm->global_vm_lock)
00112 
00113 #define GVL_UNLOCK_END() \
00114   native_mutex_lock(&_th_stored->vm->global_vm_lock); \
00115   rb_thread_set_current(_th_stored); \
00116 } while(0)
00117 
00118 #define BLOCKING_REGION_CORE(exec) do { \
00119     GVL_UNLOCK_BEGIN(); {\
00120             exec; \
00121     } \
00122     GVL_UNLOCK_END(); \
00123 } while(0);
00124 
00125 #define blocking_region_begin(th, region, func, arg) \
00126   do { \
00127     (region)->prev_status = (th)->status; \
00128     set_unblock_function((th), (func), (arg), &(region)->oldubf); \
00129     (th)->blocking_region_buffer = (region); \
00130     (th)->status = THREAD_STOPPED; \
00131     thread_debug("enter blocking region (%p)\n", (void *)(th)); \
00132     RB_GC_SAVE_MACHINE_CONTEXT(th); \
00133     native_mutex_unlock(&(th)->vm->global_vm_lock); \
00134   } while (0)
00135 
00136 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
00137     rb_thread_t *__th = GET_THREAD(); \
00138     struct rb_blocking_region_buffer __region; \
00139     blocking_region_begin(__th, &__region, ubf, ubfarg); \
00140     exec; \
00141     blocking_region_end(__th, &__region); \
00142     RUBY_VM_CHECK_INTS(); \
00143 } while(0)
00144 
00145 #if THREAD_DEBUG
00146 #ifdef HAVE_VA_ARGS_MACRO
00147 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
00148 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
00149 #define POSITION_FORMAT "%s:%d:"
00150 #define POSITION_ARGS ,file, line
00151 #else
00152 void rb_thread_debug(const char *fmt, ...);
00153 #define thread_debug rb_thread_debug
00154 #define POSITION_FORMAT
00155 #define POSITION_ARGS
00156 #endif
00157 
00158 # if THREAD_DEBUG < 0
00159 static int rb_thread_debug_enabled;
00160 
00161 /*
00162  *  call-seq:
00163  *     Thread.DEBUG     -> num
00164  *
00165  *  Returns the thread debug level.  Available only if compiled with
00166  *  THREAD_DEBUG=-1.
00167  */
00168 
00169 static VALUE
00170 rb_thread_s_debug(void)
00171 {
00172     return INT2NUM(rb_thread_debug_enabled);
00173 }
00174 
00175 /*
00176  *  call-seq:
00177  *     Thread.DEBUG = num
00178  *
00179  *  Sets the thread debug level.  Available only if compiled with
00180  *  THREAD_DEBUG=-1.
00181  */
00182 
00183 static VALUE
00184 rb_thread_s_debug_set(VALUE self, VALUE val)
00185 {
00186     rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
00187     return val;
00188 }
00189 # else
00190 # define rb_thread_debug_enabled THREAD_DEBUG
00191 # endif
00192 #else
00193 #define thread_debug if(0)printf
00194 #endif
00195 
00196 #ifndef __ia64
00197 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
00198 #endif
00199 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
00200                                         VALUE *register_stack_start));
00201 static void timer_thread_function(void *);
00202 
00203 #if   defined(_WIN32)
00204 #include "thread_win32.c"
00205 
00206 #define DEBUG_OUT() \
00207   WaitForSingleObject(&debug_mutex, INFINITE); \
00208   printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
00209   fflush(stdout); \
00210   ReleaseMutex(&debug_mutex);
00211 
00212 #elif defined(HAVE_PTHREAD_H)
00213 #include "thread_pthread.c"
00214 
00215 #define DEBUG_OUT() \
00216   pthread_mutex_lock(&debug_mutex); \
00217   printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
00218   fflush(stdout); \
00219   pthread_mutex_unlock(&debug_mutex);
00220 
00221 #else
00222 #error "unsupported thread type"
00223 #endif
00224 
00225 #if THREAD_DEBUG
00226 static int debug_mutex_initialized = 1;
00227 static rb_thread_lock_t debug_mutex;
00228 
00229 void
00230 rb_thread_debug(
00231 #ifdef HAVE_VA_ARGS_MACRO
00232     const char *file, int line,
00233 #endif
00234     const char *fmt, ...)
00235 {
00236     va_list args;
00237     char buf[BUFSIZ];
00238 
00239     if (!rb_thread_debug_enabled) return;
00240 
00241     if (debug_mutex_initialized == 1) {
00242         debug_mutex_initialized = 0;
00243         native_mutex_initialize(&debug_mutex);
00244     }
00245 
00246     va_start(args, fmt);
00247     vsnprintf(buf, BUFSIZ, fmt, args);
00248     va_end(args);
00249 
00250     DEBUG_OUT();
00251 }
00252 #endif
00253 
00254 void
00255 rb_thread_lock_unlock(rb_thread_lock_t *lock)
00256 {
00257     native_mutex_unlock(lock);
00258 }
00259 
00260 void
00261 rb_thread_lock_destroy(rb_thread_lock_t *lock)
00262 {
00263     native_mutex_destroy(lock);
00264 }
00265 
00266 static void
00267 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00268                      struct rb_unblock_callback *old)
00269 {
00270   check_ints:
00271     RUBY_VM_CHECK_INTS(); /* check signal or so */
00272     native_mutex_lock(&th->interrupt_lock);
00273     if (th->interrupt_flag) {
00274         native_mutex_unlock(&th->interrupt_lock);
00275         goto check_ints;
00276     }
00277     else {
00278         if (old) *old = th->unblock;
00279         th->unblock.func = func;
00280         th->unblock.arg = arg;
00281     }
00282     native_mutex_unlock(&th->interrupt_lock);
00283 }
00284 
00285 static void
00286 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
00287 {
00288     native_mutex_lock(&th->interrupt_lock);
00289     th->unblock = *old;
00290     native_mutex_unlock(&th->interrupt_lock);
00291 }
00292 
00293 void
00294 rb_threadptr_interrupt(rb_thread_t *th)
00295 {
00296     native_mutex_lock(&th->interrupt_lock);
00297     RUBY_VM_SET_INTERRUPT(th);
00298     if (th->unblock.func) {
00299         (th->unblock.func)(th->unblock.arg);
00300     }
00301     else {
00302         /* none */
00303     }
00304     native_mutex_unlock(&th->interrupt_lock);
00305 }
00306 
00307 
00308 static int
00309 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
00310 {
00311     VALUE thval = key;
00312     rb_thread_t *th;
00313     GetThreadPtr(thval, th);
00314 
00315     if (th != main_thread) {
00316         thread_debug("terminate_i: %p\n", (void *)th);
00317         rb_threadptr_interrupt(th);
00318         th->thrown_errinfo = eTerminateSignal;
00319         th->status = THREAD_TO_KILL;
00320     }
00321     else {
00322         thread_debug("terminate_i: main thread (%p)\n", (void *)th);
00323     }
00324     return ST_CONTINUE;
00325 }
00326 
00327 typedef struct rb_mutex_struct
00328 {
00329     rb_thread_lock_t lock;
00330     rb_thread_cond_t cond;
00331     struct rb_thread_struct volatile *th;
00332     volatile int cond_waiting, cond_notified;
00333     struct rb_mutex_struct *next_mutex;
00334 } mutex_t;
00335 
00336 static void rb_mutex_unlock_all(mutex_t *mutex, rb_thread_t *th);
00337 static void rb_mutex_abandon_all(mutex_t *mutexes);
00338 
00339 void
00340 rb_thread_terminate_all(void)
00341 {
00342     rb_thread_t *th = GET_THREAD(); /* main thread */
00343     rb_vm_t *vm = th->vm;
00344 
00345     if (vm->main_thread != th) {
00346         rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
00347                (void *)vm->main_thread, (void *)th);
00348     }
00349 
00350     /* unlock all locking mutexes */
00351     if (th->keeping_mutexes) {
00352         rb_mutex_unlock_all(th->keeping_mutexes, GET_THREAD());
00353     }
00354 
00355     thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
00356     st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
00357 
00358     while (!rb_thread_alone()) {
00359         PUSH_TAG();
00360         if (EXEC_TAG() == 0) {
00361             rb_thread_schedule();
00362         }
00363         else {
00364             /* ignore exception */
00365         }
00366         POP_TAG();
00367     }
00368     rb_thread_stop_timer_thread();
00369 }
00370 
00371 static void
00372 thread_unlock_all_locking_mutexes(rb_thread_t *th)
00373 {
00374     if (th->keeping_mutexes) {
00375         rb_mutex_unlock_all(th->keeping_mutexes, th);
00376         th->keeping_mutexes = NULL;
00377     }
00378 }
00379 
00380 static void
00381 thread_cleanup_func_before_exec(void *th_ptr)
00382 {
00383     rb_thread_t *th = th_ptr;
00384     th->status = THREAD_KILLED;
00385     th->machine_stack_start = th->machine_stack_end = 0;
00386 #ifdef __ia64
00387     th->machine_register_stack_start = th->machine_register_stack_end = 0;
00388 #endif
00389 }
00390 
00391 static void
00392 thread_cleanup_func(void *th_ptr)
00393 {
00394     rb_thread_t *th = th_ptr;
00395 
00396     th->locking_mutex = Qfalse;
00397     thread_cleanup_func_before_exec(th_ptr);
00398     native_thread_destroy(th);
00399 }
00400 
00401 extern void ruby_error_print(void);
00402 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
00403 void rb_thread_recycle_stack_release(VALUE *);
00404 
00405 void
00406 ruby_thread_init_stack(rb_thread_t *th)
00407 {
00408     native_thread_init_stack(th);
00409 }
00410 
00411 static int
00412 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
00413 {
00414     int state;
00415     VALUE args = th->first_args;
00416     rb_proc_t *proc;
00417     rb_thread_t *join_th;
00418     rb_thread_t *main_th;
00419     VALUE errinfo = Qnil;
00420 # ifdef USE_SIGALTSTACK
00421     void rb_register_sigaltstack(rb_thread_t *th);
00422 
00423     rb_register_sigaltstack(th);
00424 # endif
00425 
00426     ruby_thread_set_native(th);
00427 
00428     th->machine_stack_start = stack_start;
00429 #ifdef __ia64
00430     th->machine_register_stack_start = register_stack_start;
00431 #endif
00432     thread_debug("thread start: %p\n", (void *)th);
00433 
00434     native_mutex_lock(&th->vm->global_vm_lock);
00435     {
00436         thread_debug("thread start (get lock): %p\n", (void *)th);
00437         rb_thread_set_current(th);
00438 
00439         TH_PUSH_TAG(th);
00440         if ((state = EXEC_TAG()) == 0) {
00441             SAVE_ROOT_JMPBUF(th, {
00442                 if (!th->first_func) {
00443                     GetProcPtr(th->first_proc, proc);
00444                     th->errinfo = Qnil;
00445                     th->local_lfp = proc->block.lfp;
00446                     th->local_svar = Qnil;
00447                     th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
00448                                                   (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
00449                 }
00450                 else {
00451                     th->value = (*th->first_func)((void *)args);
00452                 }
00453             });
00454         }
00455         else {
00456             errinfo = th->errinfo;
00457             if (NIL_P(errinfo)) errinfo = rb_errinfo();
00458             if (state == TAG_FATAL) {
00459                 /* fatal error within this thread, need to stop whole script */
00460             }
00461             else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
00462                 if (th->safe_level >= 4) {
00463                     th->errinfo = rb_exc_new3(rb_eSecurityError,
00464                                               rb_sprintf("Insecure exit at level %d", th->safe_level));
00465                     errinfo = Qnil;
00466                 }
00467             }
00468             else if (th->safe_level < 4 &&
00469                      (th->vm->thread_abort_on_exception ||
00470                       th->abort_on_exception || RTEST(ruby_debug))) {
00471                 /* exit on main_thread */
00472             }
00473             else {
00474                 errinfo = Qnil;
00475             }
00476             th->value = Qnil;
00477         }
00478 
00479         th->status = THREAD_KILLED;
00480         thread_debug("thread end: %p\n", (void *)th);
00481 
00482         main_th = th->vm->main_thread;
00483         if (th != main_th) {
00484             if (TYPE(errinfo) == T_OBJECT) {
00485                 /* treat with normal error object */
00486                 rb_threadptr_raise(main_th, 1, &errinfo);
00487             }
00488         }
00489         TH_POP_TAG();
00490 
00491         /* locking_mutex must be Qfalse */
00492         if (th->locking_mutex != Qfalse) {
00493             rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
00494                    (void *)th, th->locking_mutex);
00495         }
00496 
00497         /* delete self other than main thread from living_threads */
00498         if (th != main_th) {
00499             st_delete_wrap(th->vm->living_threads, th->self);
00500         }
00501 
00502         /* wake up joining threads */
00503         join_th = th->join_list_head;
00504         while (join_th) {
00505             if (join_th == main_th) errinfo = Qnil;
00506             rb_threadptr_interrupt(join_th);
00507             switch (join_th->status) {
00508               case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
00509                 join_th->status = THREAD_RUNNABLE;
00510               default: break;
00511             }
00512             join_th = join_th->join_list_next;
00513         }
00514 
00515         if (!th->root_fiber) {
00516             rb_thread_recycle_stack_release(th->stack);
00517             th->stack = 0;
00518         }
00519     }
00520     thread_unlock_all_locking_mutexes(th);
00521     if (th != main_th) rb_check_deadlock(th->vm);
00522     if (th->vm->main_thread == th) {
00523         ruby_cleanup(state);
00524     }
00525     else {
00526         thread_cleanup_func(th);
00527         native_mutex_unlock(&th->vm->global_vm_lock);
00528     }
00529 
00530     return 0;
00531 }
00532 
00533 static VALUE
00534 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
00535 {
00536     rb_thread_t *th;
00537     int err;
00538 
00539     if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
00540         rb_raise(rb_eThreadError,
00541                  "can't start a new thread (frozen ThreadGroup)");
00542     }
00543     GetThreadPtr(thval, th);
00544 
00545     /* setup thread environment */
00546     th->first_func = fn;
00547     th->first_proc = fn ? Qfalse : rb_block_proc();
00548     th->first_args = args; /* GC: shouldn't put before above line */
00549 
00550     th->priority = GET_THREAD()->priority;
00551     th->thgroup = GET_THREAD()->thgroup;
00552 
00553     native_mutex_initialize(&th->interrupt_lock);
00554     if (GET_VM()->event_hooks != NULL)
00555         th->event_flags |= RUBY_EVENT_VM;
00556 
00557     /* kick thread */
00558     st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
00559     err = native_thread_create(th);
00560     if (err) {
00561         st_delete_wrap(th->vm->living_threads, th->self);
00562         th->status = THREAD_KILLED;
00563         rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
00564     }
00565     return thval;
00566 }
00567 
00568 /* :nodoc: */
00569 static VALUE
00570 thread_s_new(int argc, VALUE *argv, VALUE klass)
00571 {
00572     rb_thread_t *th;
00573     VALUE thread = rb_thread_alloc(klass);
00574     rb_obj_call_init(thread, argc, argv);
00575     GetThreadPtr(thread, th);
00576     if (!th->first_args) {
00577         rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
00578                  rb_class2name(klass));
00579     }
00580     return thread;
00581 }
00582 
00583 /*
00584  *  call-seq:
00585  *     Thread.start([args]*) {|args| block }   -> thread
00586  *     Thread.fork([args]*) {|args| block }    -> thread
00587  *
00588  *  Basically the same as <code>Thread::new</code>. However, if class
00589  *  <code>Thread</code> is subclassed, then calling <code>start</code> in that
00590  *  subclass will not invoke the subclass's <code>initialize</code> method.
00591  */
00592 
00593 static VALUE
00594 thread_start(VALUE klass, VALUE args)
00595 {
00596     return thread_create_core(rb_thread_alloc(klass), args, 0);
00597 }
00598 
00599 /* :nodoc: */
00600 static VALUE
00601 thread_initialize(VALUE thread, VALUE args)
00602 {
00603     rb_thread_t *th;
00604     if (!rb_block_given_p()) {
00605         rb_raise(rb_eThreadError, "must be called with a block");
00606     }
00607     GetThreadPtr(thread, th);
00608     if (th->first_args) {
00609         VALUE rb_proc_location(VALUE self);
00610         VALUE proc = th->first_proc, line, loc;
00611         const char *file;
00612         if (!proc || !RTEST(loc = rb_proc_location(proc))) {
00613             rb_raise(rb_eThreadError, "already initialized thread");
00614         }
00615         file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
00616         if (NIL_P(line = RARRAY_PTR(loc)[1])) {
00617             rb_raise(rb_eThreadError, "already initialized thread - %s",
00618                      file);
00619         }
00620         rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
00621                  file, NUM2INT(line));
00622     }
00623     return thread_create_core(thread, args, 0);
00624 }
00625 
00626 VALUE
00627 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
00628 {
00629     return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
00630 }
00631 
00632 
00633 /* +infty, for this purpose */
00634 #define DELAY_INFTY 1E30
00635 
00636 struct join_arg {
00637     rb_thread_t *target, *waiting;
00638     double limit;
00639     int forever;
00640 };
00641 
00642 static VALUE
00643 remove_from_join_list(VALUE arg)
00644 {
00645     struct join_arg *p = (struct join_arg *)arg;
00646     rb_thread_t *target_th = p->target, *th = p->waiting;
00647 
00648     if (target_th->status != THREAD_KILLED) {
00649         rb_thread_t **pth = &target_th->join_list_head;
00650 
00651         while (*pth) {
00652             if (*pth == th) {
00653                 *pth = th->join_list_next;
00654                 break;
00655             }
00656             pth = &(*pth)->join_list_next;
00657         }
00658     }
00659 
00660     return Qnil;
00661 }
00662 
00663 static VALUE
00664 thread_join_sleep(VALUE arg)
00665 {
00666     struct join_arg *p = (struct join_arg *)arg;
00667     rb_thread_t *target_th = p->target, *th = p->waiting;
00668     double now, limit = p->limit;
00669 
00670     while (target_th->status != THREAD_KILLED) {
00671         if (p->forever) {
00672             sleep_forever(th, 1);
00673         }
00674         else {
00675             now = timeofday();
00676             if (now > limit) {
00677                 thread_debug("thread_join: timeout (thid: %p)\n",
00678                              (void *)target_th->thread_id);
00679                 return Qfalse;
00680             }
00681             sleep_wait_for_interrupt(th, limit - now);
00682         }
00683         thread_debug("thread_join: interrupted (thid: %p)\n",
00684                      (void *)target_th->thread_id);
00685     }
00686     return Qtrue;
00687 }
00688 
00689 static VALUE
00690 thread_join(rb_thread_t *target_th, double delay)
00691 {
00692     rb_thread_t *th = GET_THREAD();
00693     struct join_arg arg;
00694 
00695     arg.target = target_th;
00696     arg.waiting = th;
00697     arg.limit = timeofday() + delay;
00698     arg.forever = delay == DELAY_INFTY;
00699 
00700     thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
00701 
00702     if (target_th->status != THREAD_KILLED) {
00703         th->join_list_next = target_th->join_list_head;
00704         target_th->join_list_head = th;
00705         if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
00706                        remove_from_join_list, (VALUE)&arg)) {
00707             return Qnil;
00708         }
00709     }
00710 
00711     thread_debug("thread_join: success (thid: %p)\n",
00712                  (void *)target_th->thread_id);
00713 
00714     if (target_th->errinfo != Qnil) {
00715         VALUE err = target_th->errinfo;
00716 
00717         if (FIXNUM_P(err)) {
00718             /* */
00719         }
00720         else if (TYPE(target_th->errinfo) == T_NODE) {
00721             rb_exc_raise(rb_vm_make_jump_tag_but_local_jump(
00722                 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
00723         }
00724         else {
00725             /* normal exception */
00726             rb_exc_raise(err);
00727         }
00728     }
00729     return target_th->self;
00730 }
00731 
00732 /*
00733  *  call-seq:
00734  *     thr.join          -> thr
00735  *     thr.join(limit)   -> thr
00736  *
00737  *  The calling thread will suspend execution and run <i>thr</i>. Does not
00738  *  return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
00739  *  the time limit expires, <code>nil</code> will be returned, otherwise
00740  *  <i>thr</i> is returned.
00741  *
00742  *  Any threads not joined will be killed when the main program exits.  If
00743  *  <i>thr</i> had previously raised an exception and the
00744  *  <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
00745  *  (so the exception has not yet been processed) it will be processed at this
00746  *  time.
00747  *
00748  *     a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
00749  *     x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
00750  *     x.join # Let x thread finish, a will be killed on exit.
00751  *
00752  *  <em>produces:</em>
00753  *
00754  *     axyz
00755  *
00756  *  The following example illustrates the <i>limit</i> parameter.
00757  *
00758  *     y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
00759  *     puts "Waiting" until y.join(0.15)
00760  *
00761  *  <em>produces:</em>
00762  *
00763  *     tick...
00764  *     Waiting
00765  *     tick...
00766  *     Waitingtick...
00767  *
00768  *
00769  *     tick...
00770  */
00771 
00772 static VALUE
00773 thread_join_m(int argc, VALUE *argv, VALUE self)
00774 {
00775     rb_thread_t *target_th;
00776     double delay = DELAY_INFTY;
00777     VALUE limit;
00778 
00779     GetThreadPtr(self, target_th);
00780 
00781     rb_scan_args(argc, argv, "01", &limit);
00782     if (!NIL_P(limit)) {
00783         delay = rb_num2dbl(limit);
00784     }
00785 
00786     return thread_join(target_th, delay);
00787 }
00788 
00789 /*
00790  *  call-seq:
00791  *     thr.value   -> obj
00792  *
00793  *  Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
00794  *  its value.
00795  *
00796  *     a = Thread.new { 2 + 2 }
00797  *     a.value   #=> 4
00798  */
00799 
00800 static VALUE
00801 thread_value(VALUE self)
00802 {
00803     rb_thread_t *th;
00804     GetThreadPtr(self, th);
00805     thread_join(th, DELAY_INFTY);
00806     return th->value;
00807 }
00808 
00809 /*
00810  * Thread Scheduling
00811  */
00812 
00813 static struct timeval
00814 double2timeval(double d)
00815 {
00816     struct timeval time;
00817 
00818     time.tv_sec = (int)d;
00819     time.tv_usec = (int)((d - (int)d) * 1e6);
00820     if (time.tv_usec < 0) {
00821         time.tv_usec += (int)1e6;
00822         time.tv_sec -= 1;
00823     }
00824     return time;
00825 }
00826 
00827 static void
00828 sleep_forever(rb_thread_t *th, int deadlockable)
00829 {
00830     enum rb_thread_status prev_status = th->status;
00831 
00832     th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
00833     do {
00834         if (deadlockable) {
00835             th->vm->sleeper++;
00836             rb_check_deadlock(th->vm);
00837         }
00838         native_sleep(th, 0);
00839         if (deadlockable) {
00840             th->vm->sleeper--;
00841         }
00842         RUBY_VM_CHECK_INTS();
00843     } while (th->status == THREAD_STOPPED_FOREVER);
00844     th->status = prev_status;
00845 }
00846 
00847 static void
00848 getclockofday(struct timeval *tp)
00849 {
00850 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00851     struct timespec ts;
00852 
00853     if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
00854         tp->tv_sec = ts.tv_sec;
00855         tp->tv_usec = ts.tv_nsec / 1000;
00856     } else
00857 #endif
00858     {
00859         gettimeofday(tp, NULL);
00860     }
00861 }
00862 
00863 static void
00864 sleep_timeval(rb_thread_t *th, struct timeval tv)
00865 {
00866     struct timeval to, tvn;
00867     enum rb_thread_status prev_status = th->status;
00868 
00869     getclockofday(&to);
00870     to.tv_sec += tv.tv_sec;
00871     if ((to.tv_usec += tv.tv_usec) >= 1000000) {
00872         to.tv_sec++;
00873         to.tv_usec -= 1000000;
00874     }
00875 
00876     th->status = THREAD_STOPPED;
00877     do {
00878         native_sleep(th, &tv);
00879         RUBY_VM_CHECK_INTS();
00880         getclockofday(&tvn);
00881         if (to.tv_sec < tvn.tv_sec) break;
00882         if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
00883         thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
00884                      (long)to.tv_sec, (long)to.tv_usec,
00885                      (long)tvn.tv_sec, (long)tvn.tv_usec);
00886         tv.tv_sec = to.tv_sec - tvn.tv_sec;
00887         if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
00888             --tv.tv_sec;
00889             tv.tv_usec += 1000000;
00890         }
00891     } while (th->status == THREAD_STOPPED);
00892     th->status = prev_status;
00893 }
00894 
00895 void
00896 rb_thread_sleep_forever(void)
00897 {
00898     thread_debug("rb_thread_sleep_forever\n");
00899     sleep_forever(GET_THREAD(), 0);
00900 }
00901 
00902 static void
00903 rb_thread_sleep_deadly(void)
00904 {
00905     thread_debug("rb_thread_sleep_deadly\n");
00906     sleep_forever(GET_THREAD(), 1);
00907 }
00908 
00909 static double
00910 timeofday(void)
00911 {
00912 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00913     struct timespec tp;
00914 
00915     if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
00916         return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
00917     } else
00918 #endif
00919     {
00920         struct timeval tv;
00921         gettimeofday(&tv, NULL);
00922         return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
00923     }
00924 }
00925 
00926 static void
00927 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
00928 {
00929     sleep_timeval(th, double2timeval(sleepsec));
00930 }
00931 
00932 static void
00933 sleep_for_polling(rb_thread_t *th)
00934 {
00935     struct timeval time;
00936     time.tv_sec = 0;
00937     time.tv_usec = 100 * 1000;  /* 0.1 sec */
00938     sleep_timeval(th, time);
00939 }
00940 
00941 void
00942 rb_thread_wait_for(struct timeval time)
00943 {
00944     rb_thread_t *th = GET_THREAD();
00945     sleep_timeval(th, time);
00946 }
00947 
00948 void
00949 rb_thread_polling(void)
00950 {
00951     RUBY_VM_CHECK_INTS();
00952     if (!rb_thread_alone()) {
00953         rb_thread_t *th = GET_THREAD();
00954         sleep_for_polling(th);
00955     }
00956 }
00957 
00958 /*
00959  * CAUTION: This function causes thread switching.
00960  *          rb_thread_check_ints() check ruby's interrupts.
00961  *          some interrupt needs thread switching/invoke handlers,
00962  *          and so on.
00963  */
00964 
00965 void
00966 rb_thread_check_ints(void)
00967 {
00968     RUBY_VM_CHECK_INTS();
00969 }
00970 
00971 /*
00972  * Hidden API for tcl/tk wrapper.
00973  * There is no guarantee to perpetuate it.
00974  */
00975 int
00976 rb_thread_check_trap_pending(void)
00977 {
00978     return GET_THREAD()->exec_signal != 0;
00979 }
00980 
00981 /* This function can be called in blocking region. */
00982 int
00983 rb_thread_interrupted(VALUE thval)
00984 {
00985     rb_thread_t *th;
00986     GetThreadPtr(thval, th);
00987     return RUBY_VM_INTERRUPTED(th);
00988 }
00989 
00990 struct timeval rb_time_timeval(VALUE);
00991 
00992 void
00993 rb_thread_sleep(int sec)
00994 {
00995     rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
00996 }
00997 
00998 static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
00999 
01000 static void
01001 rb_thread_schedule_rec(int sched_depth)
01002 {
01003     thread_debug("rb_thread_schedule\n");
01004     if (!rb_thread_alone()) {
01005         rb_thread_t *th = GET_THREAD();
01006 
01007         thread_debug("rb_thread_schedule/switch start\n");
01008 
01009         RB_GC_SAVE_MACHINE_CONTEXT(th);
01010         native_mutex_unlock(&th->vm->global_vm_lock);
01011         {
01012             native_thread_yield();
01013         }
01014         native_mutex_lock(&th->vm->global_vm_lock);
01015 
01016         rb_thread_set_current(th);
01017         thread_debug("rb_thread_schedule/switch done\n");
01018 
01019         if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
01020             rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
01021         }
01022     }
01023 }
01024 
01025 void
01026 rb_thread_schedule(void)
01027 {
01028     rb_thread_schedule_rec(0);
01029 }
01030 
01031 /* blocking region */
01032 
01033 static inline void
01034 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
01035 {
01036     native_mutex_lock(&th->vm->global_vm_lock);
01037     rb_thread_set_current(th);
01038     thread_debug("leave blocking region (%p)\n", (void *)th);
01039     remove_signal_thread_list(th);
01040     th->blocking_region_buffer = 0;
01041     reset_unblock_function(th, &region->oldubf);
01042     if (th->status == THREAD_STOPPED) {
01043         th->status = region->prev_status;
01044     }
01045 }
01046 
01047 struct rb_blocking_region_buffer *
01048 rb_thread_blocking_region_begin(void)
01049 {
01050     rb_thread_t *th = GET_THREAD();
01051     struct rb_blocking_region_buffer *region = ALLOC(struct rb_blocking_region_buffer);
01052     blocking_region_begin(th, region, ubf_select, th);
01053     return region;
01054 }
01055 
01056 void
01057 rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
01058 {
01059     int saved_errno = errno;
01060     rb_thread_t *th = GET_THREAD();
01061     blocking_region_end(th, region);
01062     xfree(region);
01063     RUBY_VM_CHECK_INTS();
01064     errno = saved_errno;
01065 }
01066 
01067 /*
01068  * rb_thread_blocking_region - permit concurrent/parallel execution.
01069  *
01070  * This function does:
01071  *   (1) release GVL.
01072  *       Other Ruby threads may run in parallel.
01073  *   (2) call func with data1.
01074  *   (3) acquire GVL.
01075  *       Other Ruby threads can not run in parallel any more.
01076  *
01077  *   If another thread interrupts this thread (Thread#kill, signal delivery,
01078  *   VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
01079  *   "un-blocking function").  `ubf()' should interrupt `func()' execution.
01080  *
01081  *   There are built-in ubfs and you can specify these ubfs.
01082  *   However, we can not guarantee our built-in ubfs interrupt
01083  *   your `func()' correctly.  Be careful to use rb_thread_blocking_region().
01084  *
01085  *     * RUBY_UBF_IO: ubf for IO operation
01086  *     * RUBY_UBF_PROCESS: ubf for process operation
01087  *
01088  *   NOTE: You can not execute most of Ruby C API and touch Ruby
01089  *         objects in `func()' and `ubf()', including raising an
01090  *         exception, because current thread doesn't acquire GVL
01091  *         (cause synchronization problem).  If you need to do it,
01092  *         read source code of C APIs and confirm by yourself.
01093  *
01094  *   NOTE: In short, this API is difficult to use safely.  I recommend you
01095  *         use other ways if you have.  We lack experiences to use this API.
01096  *         Please report your problem related on it.
01097  *
01098  *   Safe C API:
01099  *     * rb_thread_interrupted() - check interrupt flag
01100  *     * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
01101  *         if they called without GVL, acquire GVL automatically.
01102  */
01103 VALUE
01104 rb_thread_blocking_region(
01105     rb_blocking_function_t *func, void *data1,
01106     rb_unblock_function_t *ubf, void *data2)
01107 {
01108     VALUE val;
01109     rb_thread_t *th = GET_THREAD();
01110     int saved_errno = 0;
01111 
01112     if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
01113         ubf = ubf_select;
01114         data2 = th;
01115     }
01116 
01117     BLOCKING_REGION({
01118         val = func(data1);
01119         saved_errno = errno;
01120     }, ubf, data2);
01121     errno = saved_errno;
01122 
01123     return val;
01124 }
01125 
01126 /* alias of rb_thread_blocking_region() */
01127 
01128 VALUE
01129 rb_thread_call_without_gvl(
01130     rb_blocking_function_t *func, void *data1,
01131     rb_unblock_function_t *ubf, void *data2)
01132 {
01133     return rb_thread_blocking_region(func, data1, ubf, data2);
01134 }
01135 
01136 /*
01137  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
01138  *
01139  ***
01140  *** This API is EXPERIMENTAL!
01141  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01142  ***
01143  *
01144  * While releasing GVL using rb_thread_blocking_region() or
01145  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
01146  * If you need to access it, you must use this function rb_thread_call_with_gvl().
01147  *
01148  * This function rb_thread_call_with_gvl() does:
01149  * (1) acquire GVL.
01150  * (2) call passed function `func'.
01151  * (3) release GVL.
01152  * (4) return a value which is returned at (2).
01153  *
01154  * NOTE: You should not return Ruby object at (2) because such Object
01155  *       will not marked.
01156  *
01157  * NOTE: If an exception is raised in `func', this function "DOES NOT"
01158  *       protect (catch) the exception.  If you have any resources
01159  *       which should free before throwing exception, you need use
01160  *       rb_protect() in `func' and return a value which represents
01161  *       exception is raised.
01162  *
01163  * NOTE: This functions should not be called by a thread which
01164  *       is not created as Ruby thread (created by Thread.new or so).
01165  *       In other words, this function *DOES NOT* associate
01166  *       NON-Ruby thread to Ruby thread.
01167  */
01168 void *
01169 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
01170 {
01171     rb_thread_t *th = ruby_thread_from_native();
01172     struct rb_blocking_region_buffer *brb;
01173     struct rb_unblock_callback prev_unblock;
01174     void *r;
01175 
01176     if (th == 0) {
01177         /* Error is occurred, but we can't use rb_bug()
01178          * because this thread is not Ruby's thread.
01179          * What should we do?
01180          */
01181 
01182         fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
01183         exit(1);
01184     }
01185 
01186     brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
01187     prev_unblock = th->unblock;
01188 
01189     if (brb == 0) {
01190         rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
01191     }
01192 
01193     blocking_region_end(th, brb);
01194     /* enter to Ruby world: You can access Ruby values, methods and so on. */
01195     r = (*func)(data1);
01196     /* leave from Ruby world: You can not access Ruby values, etc. */
01197     blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
01198     return r;
01199 }
01200 
01201 /*
01202  * ruby_thread_has_gvl_p - check if current native thread has GVL.
01203  *
01204  ***
01205  *** This API is EXPERIMENTAL!
01206  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01207  ***
01208  */
01209 
01210 int
01211 ruby_thread_has_gvl_p(void)
01212 {
01213     rb_thread_t *th = ruby_thread_from_native();
01214 
01215     if (th && th->blocking_region_buffer == 0) {
01216         return 1;
01217     }
01218     else {
01219         return 0;
01220     }
01221 }
01222 
01223 /*
01224  *  call-seq:
01225  *     Thread.pass   -> nil
01226  *
01227  *  Invokes the thread scheduler to pass execution to another thread.
01228  *
01229  *     a = Thread.new { print "a"; Thread.pass;
01230  *                      print "b"; Thread.pass;
01231  *                      print "c" }
01232  *     b = Thread.new { print "x"; Thread.pass;
01233  *                      print "y"; Thread.pass;
01234  *                      print "z" }
01235  *     a.join
01236  *     b.join
01237  *
01238  *  <em>produces:</em>
01239  *
01240  *     axbycz
01241  */
01242 
01243 static VALUE
01244 thread_s_pass(VALUE klass)
01245 {
01246     rb_thread_schedule();
01247     return Qnil;
01248 }
01249 
01250 /*
01251  *
01252  */
01253 
01254 static void
01255 rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
01256 {
01257     if (GET_VM()->main_thread == th) {
01258         while (rb_signal_buff_size() && !th->exec_signal) native_thread_yield();
01259     }
01260 
01261     if (th->raised_flag) return;
01262 
01263     while (th->interrupt_flag) {
01264         enum rb_thread_status status = th->status;
01265         int timer_interrupt = th->interrupt_flag & 0x01;
01266         int finalizer_interrupt = th->interrupt_flag & 0x04;
01267 
01268         th->status = THREAD_RUNNABLE;
01269         th->interrupt_flag = 0;
01270 
01271         /* signal handling */
01272         if (th->exec_signal) {
01273             int sig = th->exec_signal;
01274             th->exec_signal = 0;
01275             rb_signal_exec(th, sig);
01276         }
01277 
01278         /* exception from another thread */
01279         if (th->thrown_errinfo) {
01280             VALUE err = th->thrown_errinfo;
01281             th->thrown_errinfo = 0;
01282             thread_debug("rb_thread_execute_interrupts: %ld\n", err);
01283 
01284             if (err == eKillSignal || err == eTerminateSignal) {
01285                 th->errinfo = INT2FIX(TAG_FATAL);
01286                 TH_JUMP_TAG(th, TAG_FATAL);
01287             }
01288             else {
01289                 rb_exc_raise(err);
01290             }
01291         }
01292         th->status = status;
01293 
01294         if (finalizer_interrupt) {
01295             rb_gc_finalize_deferred();
01296         }
01297 
01298         if (!sched_depth && timer_interrupt) {
01299             sched_depth++;
01300             EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
01301 
01302             if (th->slice > 0) {
01303                 th->slice--;
01304             }
01305             else {
01306               reschedule:
01307                 rb_thread_schedule_rec(sched_depth+1);
01308                 if (th->slice < 0) {
01309                     th->slice++;
01310                     goto reschedule;
01311                 }
01312                 else {
01313                     th->slice = th->priority;
01314                 }
01315             }
01316         }
01317     }
01318 }
01319 
01320 void
01321 rb_threadptr_execute_interrupts(rb_thread_t *th)
01322 {
01323     rb_threadptr_execute_interrupts_rec(th, 0);
01324 }
01325 
01326 void
01327 rb_gc_mark_threads(void)
01328 {
01329     /* TODO: remove */
01330 }
01331 
01332 /*****************************************************/
01333 
01334 static void
01335 rb_threadptr_ready(rb_thread_t *th)
01336 {
01337     rb_threadptr_interrupt(th);
01338 }
01339 
01340 static VALUE
01341 rb_threadptr_raise(rb_thread_t *th, int argc, VALUE *argv)
01342 {
01343     VALUE exc;
01344 
01345   again:
01346     if (rb_threadptr_dead(th)) {
01347         return Qnil;
01348     }
01349 
01350     if (th->thrown_errinfo != 0 || th->raised_flag) {
01351         rb_thread_schedule();
01352         goto again;
01353     }
01354 
01355     exc = rb_make_exception(argc, argv);
01356     th->thrown_errinfo = exc;
01357     rb_threadptr_ready(th);
01358     return Qnil;
01359 }
01360 
01361 void
01362 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
01363 {
01364     VALUE argv[2];
01365 
01366     argv[0] = rb_eSignal;
01367     argv[1] = INT2FIX(sig);
01368     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01369 }
01370 
01371 void
01372 rb_threadptr_signal_exit(rb_thread_t *th)
01373 {
01374     VALUE argv[2];
01375 
01376     argv[0] = rb_eSystemExit;
01377     argv[1] = rb_str_new2("exit");
01378     rb_threadptr_raise(th->vm->main_thread, 2, argv);
01379 }
01380 
01381 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01382 #define USE_SIGALTSTACK
01383 #endif
01384 
01385 void
01386 ruby_thread_stack_overflow(rb_thread_t *th)
01387 {
01388     th->raised_flag = 0;
01389 #ifdef USE_SIGALTSTACK
01390     th->raised_flag = 0;
01391     rb_exc_raise(sysstack_error);
01392 #else
01393     th->errinfo = sysstack_error;
01394     TH_JUMP_TAG(th, TAG_RAISE);
01395 #endif
01396 }
01397 
01398 int
01399 rb_threadptr_set_raised(rb_thread_t *th)
01400 {
01401     if (th->raised_flag & RAISED_EXCEPTION) {
01402         return 1;
01403     }
01404     th->raised_flag |= RAISED_EXCEPTION;
01405     return 0;
01406 }
01407 
01408 int
01409 rb_threadptr_reset_raised(rb_thread_t *th)
01410 {
01411     if (!(th->raised_flag & RAISED_EXCEPTION)) {
01412         return 0;
01413     }
01414     th->raised_flag &= ~RAISED_EXCEPTION;
01415     return 1;
01416 }
01417 
01418 void
01419 rb_thread_fd_close(int fd)
01420 {
01421     /* TODO: fix me */
01422 }
01423 
01424 /*
01425  *  call-seq:
01426  *     thr.raise
01427  *     thr.raise(string)
01428  *     thr.raise(exception [, string [, array]])
01429  *
01430  *  Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
01431  *  caller does not have to be <i>thr</i>.
01432  *
01433  *     Thread.abort_on_exception = true
01434  *     a = Thread.new { sleep(200) }
01435  *     a.raise("Gotcha")
01436  *
01437  *  <em>produces:</em>
01438  *
01439  *     prog.rb:3: Gotcha (RuntimeError)
01440  *      from prog.rb:2:in `initialize'
01441  *      from prog.rb:2:in `new'
01442  *      from prog.rb:2
01443  */
01444 
01445 static VALUE
01446 thread_raise_m(int argc, VALUE *argv, VALUE self)
01447 {
01448     rb_thread_t *th;
01449     GetThreadPtr(self, th);
01450     rb_threadptr_raise(th, argc, argv);
01451     return Qnil;
01452 }
01453 
01454 
01455 /*
01456  *  call-seq:
01457  *     thr.exit        -> thr or nil
01458  *     thr.kill        -> thr or nil
01459  *     thr.terminate   -> thr or nil
01460  *
01461  *  Terminates <i>thr</i> and schedules another thread to be run. If this thread
01462  *  is already marked to be killed, <code>exit</code> returns the
01463  *  <code>Thread</code>. If this is the main thread, or the last thread, exits
01464  *  the process.
01465  */
01466 
01467 VALUE
01468 rb_thread_kill(VALUE thread)
01469 {
01470     rb_thread_t *th;
01471 
01472     GetThreadPtr(thread, th);
01473 
01474     if (th != GET_THREAD() && th->safe_level < 4) {
01475         rb_secure(4);
01476     }
01477     if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
01478         return thread;
01479     }
01480     if (th == th->vm->main_thread) {
01481         rb_exit(EXIT_SUCCESS);
01482     }
01483 
01484     thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
01485 
01486     rb_threadptr_interrupt(th);
01487     th->thrown_errinfo = eKillSignal;
01488     th->status = THREAD_TO_KILL;
01489 
01490     return thread;
01491 }
01492 
01493 
01494 /*
01495  *  call-seq:
01496  *     Thread.kill(thread)   -> thread
01497  *
01498  *  Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
01499  *
01500  *     count = 0
01501  *     a = Thread.new { loop { count += 1 } }
01502  *     sleep(0.1)       #=> 0
01503  *     Thread.kill(a)   #=> #<Thread:0x401b3d30 dead>
01504  *     count            #=> 93947
01505  *     a.alive?         #=> false
01506  */
01507 
01508 static VALUE
01509 rb_thread_s_kill(VALUE obj, VALUE th)
01510 {
01511     return rb_thread_kill(th);
01512 }
01513 
01514 
01515 /*
01516  *  call-seq:
01517  *     Thread.exit   -> thread
01518  *
01519  *  Terminates the currently running thread and schedules another thread to be
01520  *  run. If this thread is already marked to be killed, <code>exit</code>
01521  *  returns the <code>Thread</code>. If this is the main thread, or the last
01522  *  thread, exit the process.
01523  */
01524 
01525 static VALUE
01526 rb_thread_exit(void)
01527 {
01528     return rb_thread_kill(GET_THREAD()->self);
01529 }
01530 
01531 
01532 /*
01533  *  call-seq:
01534  *     thr.wakeup   -> thr
01535  *
01536  *  Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
01537  *  I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
01538  *
01539  *     c = Thread.new { Thread.stop; puts "hey!" }
01540  *     c.wakeup
01541  *
01542  *  <em>produces:</em>
01543  *
01544  *     hey!
01545  */
01546 
01547 VALUE
01548 rb_thread_wakeup(VALUE thread)
01549 {
01550     rb_thread_t *th;
01551     GetThreadPtr(thread, th);
01552 
01553     if (th->status == THREAD_KILLED) {
01554         rb_raise(rb_eThreadError, "killed thread");
01555     }
01556     rb_threadptr_ready(th);
01557     if (th->status != THREAD_TO_KILL) {
01558         th->status = THREAD_RUNNABLE;
01559     }
01560     return thread;
01561 }
01562 
01563 
01564 /*
01565  *  call-seq:
01566  *     thr.run   -> thr
01567  *
01568  *  Wakes up <i>thr</i>, making it eligible for scheduling.
01569  *
01570  *     a = Thread.new { puts "a"; Thread.stop; puts "c" }
01571  *     Thread.pass
01572  *     puts "Got here"
01573  *     a.run
01574  *     a.join
01575  *
01576  *  <em>produces:</em>
01577  *
01578  *     a
01579  *     Got here
01580  *     c
01581  */
01582 
01583 VALUE
01584 rb_thread_run(VALUE thread)
01585 {
01586     rb_thread_wakeup(thread);
01587     rb_thread_schedule();
01588     return thread;
01589 }
01590 
01591 
01592 /*
01593  *  call-seq:
01594  *     Thread.stop   -> nil
01595  *
01596  *  Stops execution of the current thread, putting it into a ``sleep'' state,
01597  *  and schedules execution of another thread.
01598  *
01599  *     a = Thread.new { print "a"; Thread.stop; print "c" }
01600  *     Thread.pass
01601  *     print "b"
01602  *     a.run
01603  *     a.join
01604  *
01605  *  <em>produces:</em>
01606  *
01607  *     abc
01608  */
01609 
01610 VALUE
01611 rb_thread_stop(void)
01612 {
01613     if (rb_thread_alone()) {
01614         rb_raise(rb_eThreadError,
01615                  "stopping only thread\n\tnote: use sleep to stop forever");
01616     }
01617     rb_thread_sleep_deadly();
01618     return Qnil;
01619 }
01620 
01621 static int
01622 thread_list_i(st_data_t key, st_data_t val, void *data)
01623 {
01624     VALUE ary = (VALUE)data;
01625     rb_thread_t *th;
01626     GetThreadPtr((VALUE)key, th);
01627 
01628     switch (th->status) {
01629       case THREAD_RUNNABLE:
01630       case THREAD_STOPPED:
01631       case THREAD_STOPPED_FOREVER:
01632       case THREAD_TO_KILL:
01633         rb_ary_push(ary, th->self);
01634       default:
01635         break;
01636     }
01637     return ST_CONTINUE;
01638 }
01639 
01640 /********************************************************************/
01641 
01642 /*
01643  *  call-seq:
01644  *     Thread.list   -> array
01645  *
01646  *  Returns an array of <code>Thread</code> objects for all threads that are
01647  *  either runnable or stopped.
01648  *
01649  *     Thread.new { sleep(200) }
01650  *     Thread.new { 1000000.times {|i| i*i } }
01651  *     Thread.new { Thread.stop }
01652  *     Thread.list.each {|t| p t}
01653  *
01654  *  <em>produces:</em>
01655  *
01656  *     #<Thread:0x401b3e84 sleep>
01657  *     #<Thread:0x401b3f38 run>
01658  *     #<Thread:0x401b3fb0 sleep>
01659  *     #<Thread:0x401bdf4c run>
01660  */
01661 
01662 VALUE
01663 rb_thread_list(void)
01664 {
01665     VALUE ary = rb_ary_new();
01666     st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
01667     return ary;
01668 }
01669 
01670 VALUE
01671 rb_thread_current(void)
01672 {
01673     return GET_THREAD()->self;
01674 }
01675 
01676 /*
01677  *  call-seq:
01678  *     Thread.current   -> thread
01679  *
01680  *  Returns the currently executing thread.
01681  *
01682  *     Thread.current   #=> #<Thread:0x401bdf4c run>
01683  */
01684 
01685 static VALUE
01686 thread_s_current(VALUE klass)
01687 {
01688     return rb_thread_current();
01689 }
01690 
01691 VALUE
01692 rb_thread_main(void)
01693 {
01694     return GET_THREAD()->vm->main_thread->self;
01695 }
01696 
01697 /*
01698  *  call-seq:
01699  *     Thread.main   -> thread
01700  *
01701  *  Returns the main thread.
01702  */
01703 
01704 static VALUE
01705 rb_thread_s_main(VALUE klass)
01706 {
01707     return rb_thread_main();
01708 }
01709 
01710 
01711 /*
01712  *  call-seq:
01713  *     Thread.abort_on_exception   -> true or false
01714  *
01715  *  Returns the status of the global ``abort on exception'' condition.  The
01716  *  default is <code>false</code>. When set to <code>true</code>, or if the
01717  *  global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
01718  *  command line option <code>-d</code> was specified) all threads will abort
01719  *  (the process will <code>exit(0)</code>) if an exception is raised in any
01720  *  thread. See also <code>Thread::abort_on_exception=</code>.
01721  */
01722 
01723 static VALUE
01724 rb_thread_s_abort_exc(void)
01725 {
01726     return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
01727 }
01728 
01729 
01730 /*
01731  *  call-seq:
01732  *     Thread.abort_on_exception= boolean   -> true or false
01733  *
01734  *  When set to <code>true</code>, all threads will abort if an exception is
01735  *  raised. Returns the new state.
01736  *
01737  *     Thread.abort_on_exception = true
01738  *     t1 = Thread.new do
01739  *       puts  "In new thread"
01740  *       raise "Exception from thread"
01741  *     end
01742  *     sleep(1)
01743  *     puts "not reached"
01744  *
01745  *  <em>produces:</em>
01746  *
01747  *     In new thread
01748  *     prog.rb:4: Exception from thread (RuntimeError)
01749  *      from prog.rb:2:in `initialize'
01750  *      from prog.rb:2:in `new'
01751  *      from prog.rb:2
01752  */
01753 
01754 static VALUE
01755 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
01756 {
01757     rb_secure(4);
01758     GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
01759     return val;
01760 }
01761 
01762 
01763 /*
01764  *  call-seq:
01765  *     thr.abort_on_exception   -> true or false
01766  *
01767  *  Returns the status of the thread-local ``abort on exception'' condition for
01768  *  <i>thr</i>. The default is <code>false</code>. See also
01769  *  <code>Thread::abort_on_exception=</code>.
01770  */
01771 
01772 static VALUE
01773 rb_thread_abort_exc(VALUE thread)
01774 {
01775     rb_thread_t *th;
01776     GetThreadPtr(thread, th);
01777     return th->abort_on_exception ? Qtrue : Qfalse;
01778 }
01779 
01780 
01781 /*
01782  *  call-seq:
01783  *     thr.abort_on_exception= boolean   -> true or false
01784  *
01785  *  When set to <code>true</code>, causes all threads (including the main
01786  *  program) to abort if an exception is raised in <i>thr</i>. The process will
01787  *  effectively <code>exit(0)</code>.
01788  */
01789 
01790 static VALUE
01791 rb_thread_abort_exc_set(VALUE thread, VALUE val)
01792 {
01793     rb_thread_t *th;
01794     rb_secure(4);
01795 
01796     GetThreadPtr(thread, th);
01797     th->abort_on_exception = RTEST(val);
01798     return val;
01799 }
01800 
01801 
01802 /*
01803  *  call-seq:
01804  *     thr.group   -> thgrp or nil
01805  *
01806  *  Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
01807  *  the thread is not a member of any group.
01808  *
01809  *     Thread.main.group   #=> #<ThreadGroup:0x4029d914>
01810  */
01811 
01812 VALUE
01813 rb_thread_group(VALUE thread)
01814 {
01815     rb_thread_t *th;
01816     VALUE group;
01817     GetThreadPtr(thread, th);
01818     group = th->thgroup;
01819 
01820     if (!group) {
01821         group = Qnil;
01822     }
01823     return group;
01824 }
01825 
01826 static const char *
01827 thread_status_name(enum rb_thread_status status)
01828 {
01829     switch (status) {
01830       case THREAD_RUNNABLE:
01831         return "run";
01832       case THREAD_STOPPED:
01833       case THREAD_STOPPED_FOREVER:
01834         return "sleep";
01835       case THREAD_TO_KILL:
01836         return "aborting";
01837       case THREAD_KILLED:
01838         return "dead";
01839       default:
01840         return "unknown";
01841     }
01842 }
01843 
01844 static int
01845 rb_threadptr_dead(rb_thread_t *th)
01846 {
01847     return th->status == THREAD_KILLED;
01848 }
01849 
01850 
01851 /*
01852  *  call-seq:
01853  *     thr.status   -> string, false or nil
01854  *
01855  *  Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
01856  *  sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
01857  *  ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
01858  *  <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
01859  *  terminated with an exception.
01860  *
01861  *     a = Thread.new { raise("die now") }
01862  *     b = Thread.new { Thread.stop }
01863  *     c = Thread.new { Thread.exit }
01864  *     d = Thread.new { sleep }
01865  *     d.kill                  #=> #<Thread:0x401b3678 aborting>
01866  *     a.status                #=> nil
01867  *     b.status                #=> "sleep"
01868  *     c.status                #=> false
01869  *     d.status                #=> "aborting"
01870  *     Thread.current.status   #=> "run"
01871  */
01872 
01873 static VALUE
01874 rb_thread_status(VALUE thread)
01875 {
01876     rb_thread_t *th;
01877     GetThreadPtr(thread, th);
01878 
01879     if (rb_threadptr_dead(th)) {
01880         if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
01881             /* TODO */ ) {
01882             return Qnil;
01883         }
01884         return Qfalse;
01885     }
01886     return rb_str_new2(thread_status_name(th->status));
01887 }
01888 
01889 
01890 /*
01891  *  call-seq:
01892  *     thr.alive?   -> true or false
01893  *
01894  *  Returns <code>true</code> if <i>thr</i> is running or sleeping.
01895  *
01896  *     thr = Thread.new { }
01897  *     thr.join                #=> #<Thread:0x401b3fb0 dead>
01898  *     Thread.current.alive?   #=> true
01899  *     thr.alive?              #=> false
01900  */
01901 
01902 static VALUE
01903 rb_thread_alive_p(VALUE thread)
01904 {
01905     rb_thread_t *th;
01906     GetThreadPtr(thread, th);
01907 
01908     if (rb_threadptr_dead(th))
01909         return Qfalse;
01910     return Qtrue;
01911 }
01912 
01913 /*
01914  *  call-seq:
01915  *     thr.stop?   -> true or false
01916  *
01917  *  Returns <code>true</code> if <i>thr</i> is dead or sleeping.
01918  *
01919  *     a = Thread.new { Thread.stop }
01920  *     b = Thread.current
01921  *     a.stop?   #=> true
01922  *     b.stop?   #=> false
01923  */
01924 
01925 static VALUE
01926 rb_thread_stop_p(VALUE thread)
01927 {
01928     rb_thread_t *th;
01929     GetThreadPtr(thread, th);
01930 
01931     if (rb_threadptr_dead(th))
01932         return Qtrue;
01933     if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
01934         return Qtrue;
01935     return Qfalse;
01936 }
01937 
01938 /*
01939  *  call-seq:
01940  *     thr.safe_level   -> integer
01941  *
01942  *  Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
01943  *  levels can help when implementing sandboxes which run insecure code.
01944  *
01945  *     thr = Thread.new { $SAFE = 3; sleep }
01946  *     Thread.current.safe_level   #=> 0
01947  *     thr.safe_level              #=> 3
01948  */
01949 
01950 static VALUE
01951 rb_thread_safe_level(VALUE thread)
01952 {
01953     rb_thread_t *th;
01954     GetThreadPtr(thread, th);
01955 
01956     return INT2NUM(th->safe_level);
01957 }
01958 
01959 /*
01960  * call-seq:
01961  *   thr.inspect   -> string
01962  *
01963  * Dump the name, id, and status of _thr_ to a string.
01964  */
01965 
01966 static VALUE
01967 rb_thread_inspect(VALUE thread)
01968 {
01969     const char *cname = rb_obj_classname(thread);
01970     rb_thread_t *th;
01971     const char *status;
01972     VALUE str;
01973 
01974     GetThreadPtr(thread, th);
01975     status = thread_status_name(th->status);
01976     str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
01977     OBJ_INFECT(str, thread);
01978 
01979     return str;
01980 }
01981 
01982 VALUE
01983 rb_thread_local_aref(VALUE thread, ID id)
01984 {
01985     rb_thread_t *th;
01986     VALUE val;
01987 
01988     GetThreadPtr(thread, th);
01989     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
01990         rb_raise(rb_eSecurityError, "Insecure: thread locals");
01991     }
01992     if (!th->local_storage) {
01993         return Qnil;
01994     }
01995     if (st_lookup(th->local_storage, id, &val)) {
01996         return val;
01997     }
01998     return Qnil;
01999 }
02000 
02001 /*
02002  *  call-seq:
02003  *      thr[sym]   -> obj or nil
02004  *
02005  *  Attribute Reference---Returns the value of a thread-local variable, using
02006  *  either a symbol or a string name. If the specified variable does not exist,
02007  *  returns <code>nil</code>.
02008  *
02009  *     a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
02010  *     b = Thread.new { Thread.current[:name]  = "B"; Thread.stop }
02011  *     c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
02012  *     Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
02013  *
02014  *  <em>produces:</em>
02015  *
02016  *     #<Thread:0x401b3b3c sleep>: C
02017  *     #<Thread:0x401b3bc8 sleep>: B
02018  *     #<Thread:0x401b3c68 sleep>: A
02019  *     #<Thread:0x401bdf4c run>:
02020  */
02021 
02022 static VALUE
02023 rb_thread_aref(VALUE thread, VALUE id)
02024 {
02025     return rb_thread_local_aref(thread, rb_to_id(id));
02026 }
02027 
02028 VALUE
02029 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
02030 {
02031     rb_thread_t *th;
02032     GetThreadPtr(thread, th);
02033 
02034     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02035         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02036     }
02037     if (OBJ_FROZEN(thread)) {
02038         rb_error_frozen("thread locals");
02039     }
02040     if (!th->local_storage) {
02041         th->local_storage = st_init_numtable();
02042     }
02043     if (NIL_P(val)) {
02044         st_delete_wrap(th->local_storage, id);
02045         return Qnil;
02046     }
02047     st_insert(th->local_storage, id, val);
02048     return val;
02049 }
02050 
02051 /*
02052  *  call-seq:
02053  *      thr[sym] = obj   -> obj
02054  *
02055  *  Attribute Assignment---Sets or creates the value of a thread-local variable,
02056  *  using either a symbol or a string. See also <code>Thread#[]</code>.
02057  */
02058 
02059 static VALUE
02060 rb_thread_aset(VALUE self, VALUE id, VALUE val)
02061 {
02062     return rb_thread_local_aset(self, rb_to_id(id), val);
02063 }
02064 
02065 /*
02066  *  call-seq:
02067  *     thr.key?(sym)   -> true or false
02068  *
02069  *  Returns <code>true</code> if the given string (or symbol) exists as a
02070  *  thread-local variable.
02071  *
02072  *     me = Thread.current
02073  *     me[:oliver] = "a"
02074  *     me.key?(:oliver)    #=> true
02075  *     me.key?(:stanley)   #=> false
02076  */
02077 
02078 static VALUE
02079 rb_thread_key_p(VALUE self, VALUE key)
02080 {
02081     rb_thread_t *th;
02082     ID id = rb_to_id(key);
02083 
02084     GetThreadPtr(self, th);
02085 
02086     if (!th->local_storage) {
02087         return Qfalse;
02088     }
02089     if (st_lookup(th->local_storage, id, 0)) {
02090         return Qtrue;
02091     }
02092     return Qfalse;
02093 }
02094 
02095 static int
02096 thread_keys_i(ID key, VALUE value, VALUE ary)
02097 {
02098     rb_ary_push(ary, ID2SYM(key));
02099     return ST_CONTINUE;
02100 }
02101 
02102 static int
02103 vm_living_thread_num(rb_vm_t *vm)
02104 {
02105     return vm->living_threads->num_entries;
02106 }
02107 
02108 int
02109 rb_thread_alone(void)
02110 {
02111     int num = 1;
02112     if (GET_THREAD()->vm->living_threads) {
02113         num = vm_living_thread_num(GET_THREAD()->vm);
02114         thread_debug("rb_thread_alone: %d\n", num);
02115     }
02116     return num == 1;
02117 }
02118 
02119 /*
02120  *  call-seq:
02121  *     thr.keys   -> array
02122  *
02123  *  Returns an an array of the names of the thread-local variables (as Symbols).
02124  *
02125  *     thr = Thread.new do
02126  *       Thread.current[:cat] = 'meow'
02127  *       Thread.current["dog"] = 'woof'
02128  *     end
02129  *     thr.join   #=> #<Thread:0x401b3f10 dead>
02130  *     thr.keys   #=> [:dog, :cat]
02131  */
02132 
02133 static VALUE
02134 rb_thread_keys(VALUE self)
02135 {
02136     rb_thread_t *th;
02137     VALUE ary = rb_ary_new();
02138     GetThreadPtr(self, th);
02139 
02140     if (th->local_storage) {
02141         st_foreach(th->local_storage, thread_keys_i, ary);
02142     }
02143     return ary;
02144 }
02145 
02146 /*
02147  *  call-seq:
02148  *     thr.priority   -> integer
02149  *
02150  *  Returns the priority of <i>thr</i>. Default is inherited from the
02151  *  current thread which creating the new thread, or zero for the
02152  *  initial main thread; higher-priority thread will run more frequently
02153  *  than lower-priority threads (but lower-priority threads can also run).
02154  *
02155  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02156  *  platform.
02157  *
02158  *     Thread.current.priority   #=> 0
02159  */
02160 
02161 static VALUE
02162 rb_thread_priority(VALUE thread)
02163 {
02164     rb_thread_t *th;
02165     GetThreadPtr(thread, th);
02166     return INT2NUM(th->priority);
02167 }
02168 
02169 
02170 /*
02171  *  call-seq:
02172  *     thr.priority= integer   -> thr
02173  *
02174  *  Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
02175  *  will run more frequently than lower-priority threads (but lower-priority
02176  *  threads can also run).
02177  *
02178  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
02179  *  platform.
02180  *
02181  *     count1 = count2 = 0
02182  *     a = Thread.new do
02183  *           loop { count1 += 1 }
02184  *         end
02185  *     a.priority = -1
02186  *
02187  *     b = Thread.new do
02188  *           loop { count2 += 1 }
02189  *         end
02190  *     b.priority = -2
02191  *     sleep 1   #=> 1
02192  *     count1    #=> 622504
02193  *     count2    #=> 5832
02194  */
02195 
02196 static VALUE
02197 rb_thread_priority_set(VALUE thread, VALUE prio)
02198 {
02199     rb_thread_t *th;
02200     int priority;
02201     GetThreadPtr(thread, th);
02202 
02203     rb_secure(4);
02204 
02205 #if USE_NATIVE_THREAD_PRIORITY
02206     th->priority = NUM2INT(prio);
02207     native_thread_apply_priority(th);
02208 #else
02209     priority = NUM2INT(prio);
02210     if (priority > RUBY_THREAD_PRIORITY_MAX) {
02211         priority = RUBY_THREAD_PRIORITY_MAX;
02212     }
02213     else if (priority < RUBY_THREAD_PRIORITY_MIN) {
02214         priority = RUBY_THREAD_PRIORITY_MIN;
02215     }
02216     th->priority = priority;
02217     th->slice = priority;
02218 #endif
02219     return INT2NUM(th->priority);
02220 }
02221 
02222 /* for IO */
02223 
02224 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
02225 
02226 /*
02227  * several Unix platforms support file descriptors bigger than FD_SETSIZE
02228  * in select(2) system call.
02229  *
02230  * - Linux 2.2.12 (?)
02231  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
02232  *   select(2) documents how to allocate fd_set dynamically.
02233  *   http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
02234  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
02235  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
02236  *   select(2) documents how to allocate fd_set dynamically.
02237  *   http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
02238  * - HP-UX documents how to allocate fd_set dynamically.
02239  *   http://docs.hp.com/en/B2355-60105/select.2.html
02240  * - Solaris 8 has select_large_fdset
02241  *
02242  * When fd_set is not big enough to hold big file descriptors,
02243  * it should be allocated dynamically.
02244  * Note that this assumes fd_set is structured as bitmap.
02245  *
02246  * rb_fd_init allocates the memory.
02247  * rb_fd_term free the memory.
02248  * rb_fd_set may re-allocates bitmap.
02249  *
02250  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
02251  */
02252 
02253 void
02254 rb_fd_init(volatile rb_fdset_t *fds)
02255 {
02256     fds->maxfd = 0;
02257     fds->fdset = ALLOC(fd_set);
02258     FD_ZERO(fds->fdset);
02259 }
02260 
02261 void
02262 rb_fd_term(rb_fdset_t *fds)
02263 {
02264     if (fds->fdset) xfree(fds->fdset);
02265     fds->maxfd = 0;
02266     fds->fdset = 0;
02267 }
02268 
02269 void
02270 rb_fd_zero(rb_fdset_t *fds)
02271 {
02272     if (fds->fdset) {
02273         MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
02274         FD_ZERO(fds->fdset);
02275     }
02276 }
02277 
02278 static void
02279 rb_fd_resize(int n, rb_fdset_t *fds)
02280 {
02281     size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
02282     size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
02283 
02284     if (m < sizeof(fd_set)) m = sizeof(fd_set);
02285     if (o < sizeof(fd_set)) o = sizeof(fd_set);
02286 
02287     if (m > o) {
02288         fds->fdset = xrealloc(fds->fdset, m);
02289         memset((char *)fds->fdset + o, 0, m - o);
02290     }
02291     if (n >= fds->maxfd) fds->maxfd = n + 1;
02292 }
02293 
02294 void
02295 rb_fd_set(int n, rb_fdset_t *fds)
02296 {
02297     rb_fd_resize(n, fds);
02298     FD_SET(n, fds->fdset);
02299 }
02300 
02301 void
02302 rb_fd_clr(int n, rb_fdset_t *fds)
02303 {
02304     if (n >= fds->maxfd) return;
02305     FD_CLR(n, fds->fdset);
02306 }
02307 
02308 int
02309 rb_fd_isset(int n, const rb_fdset_t *fds)
02310 {
02311     if (n >= fds->maxfd) return 0;
02312     return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
02313 }
02314 
02315 void
02316 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
02317 {
02318     size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
02319 
02320     if (size < sizeof(fd_set)) size = sizeof(fd_set);
02321     dst->maxfd = max;
02322     dst->fdset = xrealloc(dst->fdset, size);
02323     memcpy(dst->fdset, src, size);
02324 }
02325 
02326 int
02327 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
02328 {
02329     fd_set *r = NULL, *w = NULL, *e = NULL;
02330     if (readfds) {
02331         rb_fd_resize(n - 1, readfds);
02332         r = rb_fd_ptr(readfds);
02333     }
02334     if (writefds) {
02335         rb_fd_resize(n - 1, writefds);
02336         w = rb_fd_ptr(writefds);
02337     }
02338     if (exceptfds) {
02339         rb_fd_resize(n - 1, exceptfds);
02340         e = rb_fd_ptr(exceptfds);
02341     }
02342     return select(n, r, w, e, timeout);
02343 }
02344 
02345 #undef FD_ZERO
02346 #undef FD_SET
02347 #undef FD_CLR
02348 #undef FD_ISSET
02349 
02350 #define FD_ZERO(f)      rb_fd_zero(f)
02351 #define FD_SET(i, f)    rb_fd_set(i, f)
02352 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02353 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02354 
02355 #elif defined(_WIN32)
02356 
02357 void
02358 rb_fd_init(volatile rb_fdset_t *set)
02359 {
02360     set->capa = FD_SETSIZE;
02361     set->fdset = ALLOC(fd_set);
02362     FD_ZERO(set->fdset);
02363 }
02364 
02365 void
02366 rb_fd_term(rb_fdset_t *set)
02367 {
02368     xfree(set->fdset);
02369     set->fdset = NULL;
02370     set->capa = 0;
02371 }
02372 
02373 void
02374 rb_fd_set(int fd, rb_fdset_t *set)
02375 {
02376     unsigned int i;
02377     SOCKET s = rb_w32_get_osfhandle(fd);
02378 
02379     for (i = 0; i < set->fdset->fd_count; i++) {
02380         if (set->fdset->fd_array[i] == s) {
02381             return;
02382         }
02383     }
02384     if (set->fdset->fd_count >= (unsigned)set->capa) {
02385         set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
02386         set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
02387     }
02388     set->fdset->fd_array[set->fdset->fd_count++] = s;
02389 }
02390 
02391 #undef FD_ZERO
02392 #undef FD_SET
02393 #undef FD_CLR
02394 #undef FD_ISSET
02395 
02396 #define FD_ZERO(f)      rb_fd_zero(f)
02397 #define FD_SET(i, f)    rb_fd_set(i, f)
02398 #define FD_CLR(i, f)    rb_fd_clr(i, f)
02399 #define FD_ISSET(i, f)  rb_fd_isset(i, f)
02400 
02401 #endif
02402 
02403 #if defined(__CYGWIN__) || defined(_WIN32)
02404 static long
02405 cmp_tv(const struct timeval *a, const struct timeval *b)
02406 {
02407     long d = (a->tv_sec - b->tv_sec);
02408     return (d != 0) ? d : (a->tv_usec - b->tv_usec);
02409 }
02410 
02411 static int
02412 subtract_tv(struct timeval *rest, const struct timeval *wait)
02413 {
02414     if (rest->tv_sec < wait->tv_sec) {
02415         return 0;
02416     }
02417     while (rest->tv_usec < wait->tv_usec) {
02418         if (rest->tv_sec <= wait->tv_sec) {
02419             return 0;
02420         }
02421         rest->tv_sec -= 1;
02422         rest->tv_usec += 1000 * 1000;
02423     }
02424     rest->tv_sec -= wait->tv_sec;
02425     rest->tv_usec -= wait->tv_usec;
02426     return rest->tv_sec != 0 || rest->tv_usec != 0;
02427 }
02428 #endif
02429 
02430 static int
02431 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
02432           struct timeval *timeout)
02433 {
02434     int result, lerrno;
02435     fd_set UNINITIALIZED_VAR(orig_read);
02436     fd_set UNINITIALIZED_VAR(orig_write);
02437     fd_set UNINITIALIZED_VAR(orig_except);
02438 
02439 #ifndef linux
02440     double limit = 0;
02441     struct timeval wait_rest;
02442 # if defined(__CYGWIN__) || defined(_WIN32)
02443     struct timeval start_time;
02444 # endif
02445 
02446     if (timeout) {
02447 # if defined(__CYGWIN__) || defined(_WIN32)
02448         gettimeofday(&start_time, NULL);
02449         limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
02450 # else
02451         limit = timeofday();
02452 # endif
02453         limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
02454         wait_rest = *timeout;
02455         timeout = &wait_rest;
02456     }
02457 #endif
02458 
02459     if (read) orig_read = *read;
02460     if (write) orig_write = *write;
02461     if (except) orig_except = *except;
02462 
02463   retry:
02464     lerrno = 0;
02465 
02466 #if defined(__CYGWIN__) || defined(_WIN32)
02467     {
02468         int finish = 0;
02469         /* polling duration: 100ms */
02470         struct timeval wait_100ms, *wait;
02471         wait_100ms.tv_sec = 0;
02472         wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
02473 
02474         do {
02475             wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
02476             BLOCKING_REGION({
02477                 do {
02478                     result = select(n, read, write, except, wait);
02479                     if (result < 0) lerrno = errno;
02480                     if (result != 0) break;
02481 
02482                     if (read) *read = orig_read;
02483                     if (write) *write = orig_write;
02484                     if (except) *except = orig_except;
02485                     if (timeout) {
02486                         struct timeval elapsed;
02487                         gettimeofday(&elapsed, NULL);
02488                         subtract_tv(&elapsed, &start_time);
02489                         gettimeofday(&start_time, NULL);
02490                         if (!subtract_tv(timeout, &elapsed)) {
02491                             finish = 1;
02492                             break;
02493                         }
02494                         if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
02495                     }
02496                 } while (__th->interrupt_flag == 0);
02497             }, 0, 0);
02498         } while (result == 0 && !finish);
02499     }
02500 #else
02501     BLOCKING_REGION({
02502         result = select(n, read, write, except, timeout);
02503         if (result < 0) lerrno = errno;
02504     }, ubf_select, GET_THREAD());
02505 #endif
02506 
02507     errno = lerrno;
02508 
02509     if (result < 0) {
02510         switch (errno) {
02511           case EINTR:
02512 #ifdef ERESTART
02513           case ERESTART:
02514 #endif
02515             if (read) *read = orig_read;
02516             if (write) *write = orig_write;
02517             if (except) *except = orig_except;
02518 #ifndef linux
02519             if (timeout) {
02520                 double d = limit - timeofday();
02521 
02522                 wait_rest.tv_sec = (unsigned int)d;
02523                 wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
02524                 if (wait_rest.tv_sec < 0)  wait_rest.tv_sec = 0;
02525                 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
02526             }
02527 #endif
02528             goto retry;
02529           default:
02530             break;
02531         }
02532     }
02533     return result;
02534 }
02535 
02536 static void
02537 rb_thread_wait_fd_rw(int fd, int read)
02538 {
02539     int result = 0;
02540     thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
02541 
02542     if (fd < 0) {
02543         rb_raise(rb_eIOError, "closed stream");
02544     }
02545     if (rb_thread_alone()) return;
02546     while (result <= 0) {
02547         rb_fdset_t set;
02548         rb_fd_init(&set);
02549         FD_SET(fd, &set);
02550 
02551         if (read) {
02552             result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
02553         }
02554         else {
02555             result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
02556         }
02557 
02558         rb_fd_term(&set);
02559 
02560         if (result < 0) {
02561             rb_sys_fail(0);
02562         }
02563     }
02564 
02565     thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
02566 }
02567 
02568 void
02569 rb_thread_wait_fd(int fd)
02570 {
02571     rb_thread_wait_fd_rw(fd, 1);
02572 }
02573 
02574 int
02575 rb_thread_fd_writable(int fd)
02576 {
02577     rb_thread_wait_fd_rw(fd, 0);
02578     return TRUE;
02579 }
02580 
02581 int
02582 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
02583                  struct timeval *timeout)
02584 {
02585     if (!read && !write && !except) {
02586         if (!timeout) {
02587             rb_thread_sleep_forever();
02588             return 0;
02589         }
02590         rb_thread_wait_for(*timeout);
02591         return 0;
02592     }
02593     else {
02594         return do_select(max, read, write, except, timeout);
02595     }
02596 }
02597 
02598 
02599 int
02600 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
02601                     struct timeval *timeout)
02602 {
02603     fd_set *r = NULL, *w = NULL, *e = NULL;
02604 
02605     if (!read && !write && !except) {
02606         if (!timeout) {
02607             rb_thread_sleep_forever();
02608             return 0;
02609         }
02610         rb_thread_wait_for(*timeout);
02611         return 0;
02612     }
02613 
02614     if (read) {
02615         rb_fd_resize(max - 1, read);
02616         r = rb_fd_ptr(read);
02617     }
02618     if (write) {
02619         rb_fd_resize(max - 1, write);
02620         w = rb_fd_ptr(write);
02621     }
02622     if (except) {
02623         rb_fd_resize(max - 1, except);
02624         e = rb_fd_ptr(except);
02625     }
02626     return do_select(max, r, w, e, timeout);
02627 }
02628 
02629 
02630 /*
02631  * for GC
02632  */
02633 
02634 #ifdef USE_CONSERVATIVE_STACK_END
02635 void
02636 rb_gc_set_stack_end(VALUE **stack_end_p)
02637 {
02638     VALUE stack_end;
02639     *stack_end_p = &stack_end;
02640 }
02641 #endif
02642 
02643 void
02644 rb_gc_save_machine_context(rb_thread_t *th)
02645 {
02646     FLUSH_REGISTER_WINDOWS;
02647 #ifdef __ia64
02648     th->machine_register_stack_end = rb_ia64_bsp();
02649 #endif
02650     setjmp(th->machine_regs);
02651 }
02652 
02653 /*
02654  *
02655  */
02656 
02657 int rb_get_next_signal(void);
02658 
02659 void
02660 rb_threadptr_check_signal(rb_thread_t *mth)
02661 {
02662     int sig;
02663 
02664     /* mth must be main_thread */
02665 
02666     if (!mth->exec_signal && (sig = rb_get_next_signal()) > 0) {
02667         enum rb_thread_status prev_status = mth->status;
02668         thread_debug("main_thread: %s, sig: %d\n",
02669                      thread_status_name(prev_status), sig);
02670         mth->exec_signal = sig;
02671         if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
02672         rb_threadptr_interrupt(mth);
02673         mth->status = prev_status;
02674     }
02675 }
02676 
02677 static void
02678 timer_thread_function(void *arg)
02679 {
02680     rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
02681 
02682     /* for time slice */
02683     RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
02684 
02685     /* check signal */
02686     rb_threadptr_check_signal(vm->main_thread);
02687 
02688 #if 0
02689     /* prove profiler */
02690     if (vm->prove_profile.enable) {
02691         rb_thread_t *th = vm->running_thread;
02692 
02693         if (vm->during_gc) {
02694             /* GC prove profiling */
02695         }
02696     }
02697 #endif
02698 }
02699 
02700 void
02701 rb_thread_stop_timer_thread(void)
02702 {
02703     if (timer_thread_id && native_stop_timer_thread()) {
02704         native_reset_timer_thread();
02705     }
02706 }
02707 
02708 void
02709 rb_thread_reset_timer_thread(void)
02710 {
02711     native_reset_timer_thread();
02712 }
02713 
02714 void
02715 rb_thread_start_timer_thread(void)
02716 {
02717     system_working = 1;
02718     rb_thread_create_timer_thread();
02719 }
02720 
02721 static int
02722 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
02723 {
02724     int i;
02725     VALUE lines = (VALUE)val;
02726 
02727     for (i = 0; i < RARRAY_LEN(lines); i++) {
02728         if (RARRAY_PTR(lines)[i] != Qnil) {
02729             RARRAY_PTR(lines)[i] = INT2FIX(0);
02730         }
02731     }
02732     return ST_CONTINUE;
02733 }
02734 
02735 static void
02736 clear_coverage(void)
02737 {
02738     extern VALUE rb_get_coverages(void);
02739     VALUE coverages = rb_get_coverages();
02740     if (RTEST(coverages)) {
02741         st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
02742     }
02743 }
02744 
02745 static void
02746 rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
02747 {
02748     rb_thread_t *th = GET_THREAD();
02749     rb_vm_t *vm = th->vm;
02750     VALUE thval = th->self;
02751     vm->main_thread = th;
02752 
02753     native_mutex_reinitialize_atfork(&th->vm->global_vm_lock);
02754     st_foreach(vm->living_threads, atfork, (st_data_t)th);
02755     st_clear(vm->living_threads);
02756     st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
02757     vm->sleeper = 0;
02758     clear_coverage();
02759 }
02760 
02761 static int
02762 terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
02763 {
02764     VALUE thval = key;
02765     rb_thread_t *th;
02766     GetThreadPtr(thval, th);
02767 
02768     if (th != (rb_thread_t *)current_th) {
02769         if (th->keeping_mutexes) {
02770             rb_mutex_abandon_all(th->keeping_mutexes);
02771         }
02772         th->keeping_mutexes = NULL;
02773         thread_cleanup_func(th);
02774     }
02775     return ST_CONTINUE;
02776 }
02777 
02778 void
02779 rb_thread_atfork(void)
02780 {
02781     rb_thread_atfork_internal(terminate_atfork_i);
02782     GET_THREAD()->join_list_head = 0;
02783     rb_reset_random_seed();
02784 }
02785 
02786 static int
02787 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
02788 {
02789     VALUE thval = key;
02790     rb_thread_t *th;
02791     GetThreadPtr(thval, th);
02792 
02793     if (th != (rb_thread_t *)current_th) {
02794         thread_cleanup_func_before_exec(th);
02795     }
02796     return ST_CONTINUE;
02797 }
02798 
02799 void
02800 rb_thread_atfork_before_exec(void)
02801 {
02802     rb_thread_atfork_internal(terminate_atfork_before_exec_i);
02803 }
02804 
02805 struct thgroup {
02806     int enclosed;
02807     VALUE group;
02808 };
02809 
02810 static size_t
02811 thgroup_memsize(const void *ptr)
02812 {
02813     return ptr ? sizeof(struct thgroup) : 0;
02814 }
02815 
02816 static const rb_data_type_t thgroup_data_type = {
02817     "thgroup",
02818     NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,
02819 };
02820 
02821 /*
02822  * Document-class: ThreadGroup
02823  *
02824  *  <code>ThreadGroup</code> provides a means of keeping track of a number of
02825  *  threads as a group. A <code>Thread</code> can belong to only one
02826  *  <code>ThreadGroup</code> at a time; adding a thread to a new group will
02827  *  remove it from any previous group.
02828  *
02829  *  Newly created threads belong to the same group as the thread from which they
02830  *  were created.
02831  */
02832 
02833 static VALUE
02834 thgroup_s_alloc(VALUE klass)
02835 {
02836     VALUE group;
02837     struct thgroup *data;
02838 
02839     group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
02840     data->enclosed = 0;
02841     data->group = group;
02842 
02843     return group;
02844 }
02845 
02846 struct thgroup_list_params {
02847     VALUE ary;
02848     VALUE group;
02849 };
02850 
02851 static int
02852 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
02853 {
02854     VALUE thread = (VALUE)key;
02855     VALUE ary = ((struct thgroup_list_params *)data)->ary;
02856     VALUE group = ((struct thgroup_list_params *)data)->group;
02857     rb_thread_t *th;
02858     GetThreadPtr(thread, th);
02859 
02860     if (th->thgroup == group) {
02861         rb_ary_push(ary, thread);
02862     }
02863     return ST_CONTINUE;
02864 }
02865 
02866 /*
02867  *  call-seq:
02868  *     thgrp.list   -> array
02869  *
02870  *  Returns an array of all existing <code>Thread</code> objects that belong to
02871  *  this group.
02872  *
02873  *     ThreadGroup::Default.list   #=> [#<Thread:0x401bdf4c run>]
02874  */
02875 
02876 static VALUE
02877 thgroup_list(VALUE group)
02878 {
02879     VALUE ary = rb_ary_new();
02880     struct thgroup_list_params param;
02881 
02882     param.ary = ary;
02883     param.group = group;
02884     st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
02885     return ary;
02886 }
02887 
02888 
02889 /*
02890  *  call-seq:
02891  *     thgrp.enclose   -> thgrp
02892  *
02893  *  Prevents threads from being added to or removed from the receiving
02894  *  <code>ThreadGroup</code>. New threads can still be started in an enclosed
02895  *  <code>ThreadGroup</code>.
02896  *
02897  *     ThreadGroup::Default.enclose        #=> #<ThreadGroup:0x4029d914>
02898  *     thr = Thread::new { Thread.stop }   #=> #<Thread:0x402a7210 sleep>
02899  *     tg = ThreadGroup::new               #=> #<ThreadGroup:0x402752d4>
02900  *     tg.add thr
02901  *
02902  *  <em>produces:</em>
02903  *
02904  *     ThreadError: can't move from the enclosed thread group
02905  */
02906 
02907 static VALUE
02908 thgroup_enclose(VALUE group)
02909 {
02910     struct thgroup *data;
02911 
02912     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02913     data->enclosed = 1;
02914 
02915     return group;
02916 }
02917 
02918 
02919 /*
02920  *  call-seq:
02921  *     thgrp.enclosed?   -> true or false
02922  *
02923  *  Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
02924  *  ThreadGroup#enclose.
02925  */
02926 
02927 static VALUE
02928 thgroup_enclosed_p(VALUE group)
02929 {
02930     struct thgroup *data;
02931 
02932     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02933     if (data->enclosed)
02934         return Qtrue;
02935     return Qfalse;
02936 }
02937 
02938 
02939 /*
02940  *  call-seq:
02941  *     thgrp.add(thread)   -> thgrp
02942  *
02943  *  Adds the given <em>thread</em> to this group, removing it from any other
02944  *  group to which it may have previously belonged.
02945  *
02946  *     puts "Initial group is #{ThreadGroup::Default.list}"
02947  *     tg = ThreadGroup.new
02948  *     t1 = Thread.new { sleep }
02949  *     t2 = Thread.new { sleep }
02950  *     puts "t1 is #{t1}"
02951  *     puts "t2 is #{t2}"
02952  *     tg.add(t1)
02953  *     puts "Initial group now #{ThreadGroup::Default.list}"
02954  *     puts "tg group now #{tg.list}"
02955  *
02956  *  <em>produces:</em>
02957  *
02958  *     Initial group is #<Thread:0x401bdf4c>
02959  *     t1 is #<Thread:0x401b3c90>
02960  *     t2 is #<Thread:0x401b3c18>
02961  *     Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
02962  *     tg group now #<Thread:0x401b3c90>
02963  */
02964 
02965 static VALUE
02966 thgroup_add(VALUE group, VALUE thread)
02967 {
02968     rb_thread_t *th;
02969     struct thgroup *data;
02970 
02971     rb_secure(4);
02972     GetThreadPtr(thread, th);
02973 
02974     if (OBJ_FROZEN(group)) {
02975         rb_raise(rb_eThreadError, "can't move to the frozen thread group");
02976     }
02977     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
02978     if (data->enclosed) {
02979         rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
02980     }
02981 
02982     if (!th->thgroup) {
02983         return Qnil;
02984     }
02985 
02986     if (OBJ_FROZEN(th->thgroup)) {
02987         rb_raise(rb_eThreadError, "can't move from the frozen thread group");
02988     }
02989     TypedData_Get_Struct(th->thgroup, struct thgroup, &thgroup_data_type, data);
02990     if (data->enclosed) {
02991         rb_raise(rb_eThreadError,
02992                  "can't move from the enclosed thread group");
02993     }
02994 
02995     th->thgroup = group;
02996     return group;
02997 }
02998 
02999 
03000 /*
03001  *  Document-class: Mutex
03002  *
03003  *  Mutex implements a simple semaphore that can be used to coordinate access to
03004  *  shared data from multiple concurrent threads.
03005  *
03006  *  Example:
03007  *
03008  *    require 'thread'
03009  *    semaphore = Mutex.new
03010  *
03011  *    a = Thread.new {
03012  *      semaphore.synchronize {
03013  *        # access shared resource
03014  *      }
03015  *    }
03016  *
03017  *    b = Thread.new {
03018  *      semaphore.synchronize {
03019  *        # access shared resource
03020  *      }
03021  *    }
03022  *
03023  */
03024 
03025 #define GetMutexPtr(obj, tobj) \
03026     TypedData_Get_Struct(obj, mutex_t, &mutex_data_type, tobj)
03027 
03028 static const char *mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th);
03029 
03030 #define mutex_mark NULL
03031 
03032 static void
03033 mutex_free(void *ptr)
03034 {
03035     if (ptr) {
03036         mutex_t *mutex = ptr;
03037         if (mutex->th) {
03038             /* rb_warn("free locked mutex"); */
03039             const char *err = mutex_unlock(mutex, mutex->th);
03040             if (err) rb_bug("%s", err);
03041         }
03042         native_mutex_destroy(&mutex->lock);
03043         native_cond_destroy(&mutex->cond);
03044     }
03045     ruby_xfree(ptr);
03046 }
03047 
03048 static size_t
03049 mutex_memsize(const void *ptr)
03050 {
03051     return ptr ? sizeof(mutex_t) : 0;
03052 }
03053 
03054 static const rb_data_type_t mutex_data_type = {
03055     "mutex",
03056     mutex_mark, mutex_free, mutex_memsize,
03057 };
03058 
03059 static VALUE
03060 mutex_alloc(VALUE klass)
03061 {
03062     VALUE volatile obj;
03063     mutex_t *mutex;
03064 
03065     obj = TypedData_Make_Struct(klass, mutex_t, &mutex_data_type, mutex);
03066     native_mutex_initialize(&mutex->lock);
03067     native_cond_initialize(&mutex->cond);
03068     return obj;
03069 }
03070 
03071 /*
03072  *  call-seq:
03073  *     Mutex.new   -> mutex
03074  *
03075  *  Creates a new Mutex
03076  */
03077 static VALUE
03078 mutex_initialize(VALUE self)
03079 {
03080     return self;
03081 }
03082 
03083 VALUE
03084 rb_mutex_new(void)
03085 {
03086     return mutex_alloc(rb_cMutex);
03087 }
03088 
03089 /*
03090  * call-seq:
03091  *    mutex.locked?  -> true or false
03092  *
03093  * Returns +true+ if this lock is currently held by some thread.
03094  */
03095 VALUE
03096 rb_mutex_locked_p(VALUE self)
03097 {
03098     mutex_t *mutex;
03099     GetMutexPtr(self, mutex);
03100     return mutex->th ? Qtrue : Qfalse;
03101 }
03102 
03103 static void
03104 mutex_locked(rb_thread_t *th, VALUE self)
03105 {
03106     mutex_t *mutex;
03107     GetMutexPtr(self, mutex);
03108 
03109     if (th->keeping_mutexes) {
03110         mutex->next_mutex = th->keeping_mutexes;
03111     }
03112     th->keeping_mutexes = mutex;
03113 }
03114 
03115 /*
03116  * call-seq:
03117  *    mutex.try_lock  -> true or false
03118  *
03119  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
03120  * lock was granted.
03121  */
03122 VALUE
03123 rb_mutex_trylock(VALUE self)
03124 {
03125     mutex_t *mutex;
03126     VALUE locked = Qfalse;
03127     GetMutexPtr(self, mutex);
03128 
03129     native_mutex_lock(&mutex->lock);
03130     if (mutex->th == 0) {
03131         mutex->th = GET_THREAD();
03132         locked = Qtrue;
03133 
03134         mutex_locked(GET_THREAD(), self);
03135     }
03136     native_mutex_unlock(&mutex->lock);
03137 
03138     return locked;
03139 }
03140 
03141 static int
03142 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
03143 {
03144     int interrupted = 0;
03145 #if 0 /* for debug */
03146     native_thread_yield();
03147 #endif
03148 
03149     native_mutex_lock(&mutex->lock);
03150     th->transition_for_lock = 0;
03151     while (mutex->th || (mutex->th = th, 0)) {
03152         if (last_thread) {
03153             interrupted = 2;
03154             break;
03155         }
03156 
03157         mutex->cond_waiting++;
03158         native_cond_wait(&mutex->cond, &mutex->lock);
03159         mutex->cond_notified--;
03160 
03161         if (RUBY_VM_INTERRUPTED(th)) {
03162             interrupted = 1;
03163             break;
03164         }
03165     }
03166     th->transition_for_lock = 1;
03167     native_mutex_unlock(&mutex->lock);
03168 
03169     if (interrupted == 2) native_thread_yield();
03170 #if 0 /* for debug */
03171     native_thread_yield();
03172 #endif
03173 
03174     return interrupted;
03175 }
03176 
03177 static void
03178 lock_interrupt(void *ptr)
03179 {
03180     mutex_t *mutex = (mutex_t *)ptr;
03181     native_mutex_lock(&mutex->lock);
03182     if (mutex->cond_waiting > 0) {
03183         native_cond_broadcast(&mutex->cond);
03184         mutex->cond_notified = mutex->cond_waiting;
03185         mutex->cond_waiting = 0;
03186     }
03187     native_mutex_unlock(&mutex->lock);
03188 }
03189 
03190 /*
03191  * call-seq:
03192  *    mutex.lock  -> self
03193  *
03194  * Attempts to grab the lock and waits if it isn't available.
03195  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
03196  */
03197 VALUE
03198 rb_mutex_lock(VALUE self)
03199 {
03200 
03201     if (rb_mutex_trylock(self) == Qfalse) {
03202         mutex_t *mutex;
03203         rb_thread_t *th = GET_THREAD();
03204         GetMutexPtr(self, mutex);
03205 
03206         if (mutex->th == GET_THREAD()) {
03207             rb_raise(rb_eThreadError, "deadlock; recursive locking");
03208         }
03209 
03210         while (mutex->th != th) {
03211             int interrupted;
03212             enum rb_thread_status prev_status = th->status;
03213             int last_thread = 0;
03214             struct rb_unblock_callback oldubf;
03215 
03216             set_unblock_function(th, lock_interrupt, mutex, &oldubf);
03217             th->status = THREAD_STOPPED_FOREVER;
03218             th->vm->sleeper++;
03219             th->locking_mutex = self;
03220             if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
03221                 last_thread = 1;
03222             }
03223 
03224             th->transition_for_lock = 1;
03225             BLOCKING_REGION_CORE({
03226                 interrupted = lock_func(th, mutex, last_thread);
03227             });
03228             th->transition_for_lock = 0;
03229             remove_signal_thread_list(th);
03230             reset_unblock_function(th, &oldubf);
03231 
03232             th->locking_mutex = Qfalse;
03233             if (mutex->th && interrupted == 2) {
03234                 rb_check_deadlock(th->vm);
03235             }
03236             if (th->status == THREAD_STOPPED_FOREVER) {
03237                 th->status = prev_status;
03238             }
03239             th->vm->sleeper--;
03240 
03241             if (mutex->th == th) mutex_locked(th, self);
03242 
03243             if (interrupted) {
03244                 RUBY_VM_CHECK_INTS();
03245             }
03246         }
03247     }
03248     return self;
03249 }
03250 
03251 static const char *
03252 mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
03253 {
03254     const char *err = NULL;
03255     mutex_t *th_mutex;
03256 
03257     native_mutex_lock(&mutex->lock);
03258 
03259     if (mutex->th == 0) {
03260         err = "Attempt to unlock a mutex which is not locked";
03261     }
03262     else if (mutex->th != th) {
03263         err = "Attempt to unlock a mutex which is locked by another thread";
03264     }
03265     else {
03266         mutex->th = 0;
03267         if (mutex->cond_waiting > 0) {
03268             /* waiting thread */
03269             native_cond_signal(&mutex->cond);
03270             mutex->cond_waiting--;
03271             mutex->cond_notified++;
03272         }
03273     }
03274 
03275     native_mutex_unlock(&mutex->lock);
03276 
03277     if (!err) {
03278         th_mutex = th->keeping_mutexes;
03279         if (th_mutex == mutex) {
03280             th->keeping_mutexes = mutex->next_mutex;
03281         }
03282         else {
03283             while (1) {
03284                 mutex_t *tmp_mutex;
03285                 tmp_mutex = th_mutex->next_mutex;
03286                 if (tmp_mutex == mutex) {
03287                     th_mutex->next_mutex = tmp_mutex->next_mutex;
03288                     break;
03289                 }
03290                 th_mutex = tmp_mutex;
03291             }
03292         }
03293         mutex->next_mutex = NULL;
03294     }
03295 
03296     return err;
03297 }
03298 
03299 /*
03300  * call-seq:
03301  *    mutex.unlock    -> self
03302  *
03303  * Releases the lock.
03304  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
03305  */
03306 VALUE
03307 rb_mutex_unlock(VALUE self)
03308 {
03309     const char *err;
03310     mutex_t *mutex;
03311     GetMutexPtr(self, mutex);
03312 
03313     err = mutex_unlock(mutex, GET_THREAD());
03314     if (err) rb_raise(rb_eThreadError, "%s", err);
03315 
03316     return self;
03317 }
03318 
03319 static void
03320 rb_mutex_unlock_all(mutex_t *mutexes, rb_thread_t *th)
03321 {
03322     const char *err;
03323     mutex_t *mutex;
03324 
03325     while (mutexes) {
03326         mutex = mutexes;
03327         /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
03328                 mutexes); */
03329         mutexes = mutex->next_mutex;
03330         err = mutex_unlock(mutex, th);
03331         if (err) rb_bug("invalid keeping_mutexes: %s", err);
03332     }
03333 }
03334 
03335 static void
03336 rb_mutex_abandon_all(mutex_t *mutexes)
03337 {
03338     mutex_t *mutex;
03339 
03340     while (mutexes) {
03341         mutex = mutexes;
03342         mutexes = mutex->next_mutex;
03343         mutex->th = 0;
03344         mutex->next_mutex = 0;
03345     }
03346 }
03347 
03348 static VALUE
03349 rb_mutex_sleep_forever(VALUE time)
03350 {
03351     rb_thread_sleep_deadly();
03352     return Qnil;
03353 }
03354 
03355 static VALUE
03356 rb_mutex_wait_for(VALUE time)
03357 {
03358     const struct timeval *t = (struct timeval *)time;
03359     rb_thread_wait_for(*t);
03360     return Qnil;
03361 }
03362 
03363 VALUE
03364 rb_mutex_sleep(VALUE self, VALUE timeout)
03365 {
03366     time_t beg, end;
03367     struct timeval t;
03368 
03369     if (!NIL_P(timeout)) {
03370         t = rb_time_interval(timeout);
03371     }
03372     rb_mutex_unlock(self);
03373     beg = time(0);
03374     if (NIL_P(timeout)) {
03375         rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
03376     }
03377     else {
03378         rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
03379     }
03380     end = time(0) - beg;
03381     return INT2FIX(end);
03382 }
03383 
03384 /*
03385  * call-seq:
03386  *    mutex.sleep(timeout = nil)    -> number
03387  *
03388  * Releases the lock and sleeps +timeout+ seconds if it is given and
03389  * non-nil or forever.  Raises +ThreadError+ if +mutex+ wasn't locked by
03390  * the current thread.
03391  */
03392 static VALUE
03393 mutex_sleep(int argc, VALUE *argv, VALUE self)
03394 {
03395     VALUE timeout;
03396 
03397     rb_scan_args(argc, argv, "01", &timeout);
03398     return rb_mutex_sleep(self, timeout);
03399 }
03400 
03401 /*
03402  * call-seq:
03403  *    mutex.synchronize { ... }    -> result of the block
03404  *
03405  * Obtains a lock, runs the block, and releases the lock when the block
03406  * completes.  See the example under +Mutex+.
03407  */
03408 
03409 VALUE
03410 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
03411 {
03412     rb_mutex_lock(mutex);
03413     return rb_ensure(func, arg, rb_mutex_unlock, mutex);
03414 }
03415 
03416 /*
03417  * Document-class: Barrier
03418  */
03419 static void
03420 barrier_mark(void *ptr)
03421 {
03422     rb_gc_mark((VALUE)ptr);
03423 }
03424 
03425 static const rb_data_type_t barrier_data_type = {
03426     "barrier",
03427     barrier_mark, 0, 0,
03428 };
03429 
03430 static VALUE
03431 barrier_alloc(VALUE klass)
03432 {
03433     return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
03434 }
03435 
03436 #define GetBarrierPtr(obj) (VALUE)rb_check_typeddata(obj, &barrier_data_type)
03437 
03438 VALUE
03439 rb_barrier_new(void)
03440 {
03441     VALUE barrier = barrier_alloc(rb_cBarrier);
03442     rb_mutex_lock((VALUE)DATA_PTR(barrier));
03443     return barrier;
03444 }
03445 
03446 VALUE
03447 rb_barrier_wait(VALUE self)
03448 {
03449     VALUE mutex = GetBarrierPtr(self);
03450     mutex_t *m;
03451 
03452     if (!mutex) return Qfalse;
03453     GetMutexPtr(mutex, m);
03454     if (m->th == GET_THREAD()) return Qfalse;
03455     rb_mutex_lock(mutex);
03456     if (DATA_PTR(self)) return Qtrue;
03457     rb_mutex_unlock(mutex);
03458     return Qfalse;
03459 }
03460 
03461 VALUE
03462 rb_barrier_release(VALUE self)
03463 {
03464     return rb_mutex_unlock(GetBarrierPtr(self));
03465 }
03466 
03467 VALUE
03468 rb_barrier_destroy(VALUE self)
03469 {
03470     VALUE mutex = GetBarrierPtr(self);
03471     DATA_PTR(self) = 0;
03472     return rb_mutex_unlock(mutex);
03473 }
03474 
03475 /* variables for recursive traversals */
03476 static ID recursive_key;
03477 
03478 /*
03479  * Returns the current "recursive list" used to detect recursion.
03480  * This list is a hash table, unique for the current thread and for
03481  * the current __callee__.
03482  */
03483 
03484 static VALUE
03485 recursive_list_access(void)
03486 {
03487     volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
03488     VALUE sym = ID2SYM(rb_frame_this_func());
03489     VALUE list;
03490     if (NIL_P(hash) || TYPE(hash) != T_HASH) {
03491         hash = rb_hash_new();
03492         OBJ_UNTRUST(hash);
03493         rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
03494         list = Qnil;
03495     }
03496     else {
03497         list = rb_hash_aref(hash, sym);
03498     }
03499     if (NIL_P(list) || TYPE(list) != T_HASH) {
03500         list = rb_hash_new();
03501         OBJ_UNTRUST(list);
03502         rb_hash_aset(hash, sym, list);
03503     }
03504     return list;
03505 }
03506 
03507 /*
03508  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
03509  * in the recursion list.
03510  * Assumes the recursion list is valid.
03511  */
03512 
03513 static VALUE
03514 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
03515 {
03516     VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
03517     if (pair_list == Qundef)
03518         return Qfalse;
03519     if (paired_obj_id) {
03520         if (TYPE(pair_list) != T_HASH) {
03521         if (pair_list != paired_obj_id)
03522             return Qfalse;
03523         }
03524         else {
03525         if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
03526             return Qfalse;
03527         }
03528     }
03529     return Qtrue;
03530 }
03531 
03532 /*
03533  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
03534  * For a single obj_id, it sets list[obj_id] to Qtrue.
03535  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
03536  * otherwise list[obj_id] becomes a hash like:
03537  *   {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
03538  * Assumes the recursion list is valid.
03539  */
03540 
03541 static void
03542 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
03543 {
03544     VALUE pair_list;
03545 
03546     if (!paired_obj) {
03547         rb_hash_aset(list, obj, Qtrue);
03548     }
03549     else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
03550         rb_hash_aset(list, obj, paired_obj);
03551     }
03552     else {
03553         if (TYPE(pair_list) != T_HASH){
03554             VALUE other_paired_obj = pair_list;
03555             pair_list = rb_hash_new();
03556             OBJ_UNTRUST(pair_list);
03557             rb_hash_aset(pair_list, other_paired_obj, Qtrue);
03558             rb_hash_aset(list, obj, pair_list);
03559         }
03560         rb_hash_aset(pair_list, paired_obj, Qtrue);
03561     }
03562 }
03563 
03564 /*
03565  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
03566  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
03567  * removed from the hash and no attempt is made to simplify
03568  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
03569  * Assumes the recursion list is valid.
03570  */
03571 
03572 static void
03573 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
03574 {
03575     if (paired_obj) {
03576         VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
03577         if (pair_list == Qundef) {
03578             VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
03579             VALUE thrname = rb_inspect(rb_thread_current());
03580             rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
03581                      StringValuePtr(symname), StringValuePtr(thrname));
03582         }
03583         if (TYPE(pair_list) == T_HASH) {
03584             rb_hash_delete(pair_list, paired_obj);
03585             if (!RHASH_EMPTY_P(pair_list)) {
03586                 return; /* keep hash until is empty */
03587             }
03588         }
03589     }
03590     rb_hash_delete(list, obj);
03591 }
03592 
03593 struct exec_recursive_params {
03594     VALUE (*func) (VALUE, VALUE, int);
03595     VALUE list;
03596     VALUE obj;
03597     VALUE objid;
03598     VALUE pairid;
03599     VALUE arg;
03600 };
03601 
03602 static VALUE
03603 exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
03604 {
03605     VALUE result = Qundef;
03606     int state;
03607 
03608     recursive_push(p->list, p->objid, p->pairid);
03609     PUSH_TAG();
03610     if ((state = EXEC_TAG()) == 0) {
03611         result = (*p->func)(p->obj, p->arg, FALSE);
03612     }
03613     POP_TAG();
03614     recursive_pop(p->list, p->objid, p->pairid);
03615     if (state)
03616         JUMP_TAG(state);
03617     return result;
03618 }
03619 
03620 /*
03621  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03622  * current method is called recursively on obj, or on the pair <obj, pairid>
03623  * If outer is 0, then the innermost func will be called with recursive set
03624  * to Qtrue, otherwise the outermost func will be called. In the latter case,
03625  * all inner func are short-circuited by throw.
03626  * Implementation details: the value thrown is the recursive list which is
03627  * proper to the current method and unlikely to be catched anywhere else.
03628  * list[recursive_key] is used as a flag for the outermost call.
03629  */
03630 
03631 static VALUE
03632 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
03633 {
03634     struct exec_recursive_params p;
03635     int outermost;
03636     p.list = recursive_list_access();
03637     p.objid = rb_obj_id(obj);
03638     outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
03639 
03640     if (recursive_check(p.list, p.objid, pairid)) {
03641         if (outer && !outermost) {
03642             rb_throw_obj(p.list, p.list);
03643         }
03644         return (*func)(obj, arg, TRUE);
03645     }
03646     else {
03647         VALUE result = Qundef;
03648         p.func = func;
03649         p.obj = obj;
03650         p.pairid = pairid;
03651         p.arg = arg;
03652 
03653         if (outermost) {
03654             recursive_push(p.list, ID2SYM(recursive_key), 0);
03655             result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
03656             recursive_pop(p.list, ID2SYM(recursive_key), 0);
03657             if (result == p.list) {
03658                 result = (*func)(obj, arg, TRUE);
03659             }
03660         }
03661         else {
03662             result = exec_recursive_i(0, &p);
03663         }
03664         return result;
03665     }
03666 }
03667 
03668 /*
03669  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03670  * current method is called recursively on obj
03671  */
03672 
03673 VALUE
03674 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03675 {
03676     return exec_recursive(func, obj, 0, arg, 0);
03677 }
03678 
03679 /*
03680  * Calls func(obj, arg, recursive), where recursive is non-zero if the
03681  * current method is called recursively on the ordered pair <obj, paired_obj>
03682  */
03683 
03684 VALUE
03685 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
03686 {
03687     return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
03688 }
03689 
03690 /*
03691  * If recursion is detected on the current method and obj, the outermost
03692  * func will be called with (obj, arg, Qtrue). All inner func will be
03693  * short-circuited using throw.
03694  */
03695 
03696 VALUE
03697 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
03698 {
03699     return exec_recursive(func, obj, 0, arg, 1);
03700 }
03701 
03702 /* tracer */
03703 
03704 static rb_event_hook_t *
03705 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03706 {
03707     rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
03708     hook->func = func;
03709     hook->flag = events;
03710     hook->data = data;
03711     return hook;
03712 }
03713 
03714 static void
03715 thread_reset_event_flags(rb_thread_t *th)
03716 {
03717     rb_event_hook_t *hook = th->event_hooks;
03718     rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
03719 
03720     while (hook) {
03721         flag |= hook->flag;
03722         hook = hook->next;
03723     }
03724     th->event_flags = flag;
03725 }
03726 
03727 static void
03728 rb_threadptr_add_event_hook(rb_thread_t *th,
03729                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03730 {
03731     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03732     hook->next = th->event_hooks;
03733     th->event_hooks = hook;
03734     thread_reset_event_flags(th);
03735 }
03736 
03737 static rb_thread_t *
03738 thval2thread_t(VALUE thval)
03739 {
03740     rb_thread_t *th;
03741     GetThreadPtr(thval, th);
03742     return th;
03743 }
03744 
03745 void
03746 rb_thread_add_event_hook(VALUE thval,
03747                          rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03748 {
03749     rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
03750 }
03751 
03752 static int
03753 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
03754 {
03755     VALUE thval = key;
03756     rb_thread_t *th;
03757     GetThreadPtr(thval, th);
03758 
03759     if (flag) {
03760         th->event_flags |= RUBY_EVENT_VM;
03761     }
03762     else {
03763         th->event_flags &= (~RUBY_EVENT_VM);
03764     }
03765     return ST_CONTINUE;
03766 }
03767 
03768 static void
03769 set_threads_event_flags(int flag)
03770 {
03771     st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
03772 }
03773 
03774 static inline void
03775 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03776 {
03777     for (; hook; hook = hook->next) {
03778         if (flag & hook->flag) {
03779             (*hook->func)(flag, hook->data, self, id, klass);
03780         }
03781     }
03782 }
03783 
03784 void
03785 rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
03786 {
03787     const VALUE errinfo = th->errinfo;
03788     const rb_event_flag_t wait_event = th->event_flags;
03789 
03790     if (self == rb_mRubyVMFrozenCore) return;
03791     if (wait_event & flag) {
03792         exec_event_hooks(th->event_hooks, flag, self, id, klass);
03793     }
03794     if (wait_event & RUBY_EVENT_VM) {
03795         if (th->vm->event_hooks == NULL) {
03796             th->event_flags &= (~RUBY_EVENT_VM);
03797         }
03798         else {
03799             exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
03800         }
03801     }
03802     th->errinfo = errinfo;
03803 }
03804 
03805 void
03806 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
03807 {
03808     rb_event_hook_t *hook = alloc_event_hook(func, events, data);
03809     rb_vm_t *vm = GET_VM();
03810 
03811     hook->next = vm->event_hooks;
03812     vm->event_hooks = hook;
03813 
03814     set_threads_event_flags(1);
03815 }
03816 
03817 static int
03818 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
03819 {
03820     rb_event_hook_t *prev = NULL, *hook = *root, *next;
03821 
03822     while (hook) {
03823         next = hook->next;
03824         if (func == 0 || hook->func == func) {
03825             if (prev) {
03826                 prev->next = hook->next;
03827             }
03828             else {
03829                 *root = hook->next;
03830             }
03831             xfree(hook);
03832         }
03833         else {
03834             prev = hook;
03835         }
03836         hook = next;
03837     }
03838     return -1;
03839 }
03840 
03841 static int
03842 rb_threadptr_revmove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
03843 {
03844     int ret = remove_event_hook(&th->event_hooks, func);
03845     thread_reset_event_flags(th);
03846     return ret;
03847 }
03848 
03849 int
03850 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
03851 {
03852     return rb_threadptr_revmove_event_hook(thval2thread_t(thval), func);
03853 }
03854 
03855 int
03856 rb_remove_event_hook(rb_event_hook_func_t func)
03857 {
03858     rb_vm_t *vm = GET_VM();
03859     rb_event_hook_t *hook = vm->event_hooks;
03860     int ret = remove_event_hook(&vm->event_hooks, func);
03861 
03862     if (hook != NULL && vm->event_hooks == NULL) {
03863         set_threads_event_flags(0);
03864     }
03865 
03866     return ret;
03867 }
03868 
03869 static int
03870 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
03871 {
03872     rb_thread_t *th;
03873     GetThreadPtr((VALUE)key, th);
03874     rb_threadptr_revmove_event_hook(th, 0);
03875     return ST_CONTINUE;
03876 }
03877 
03878 void
03879 rb_clear_trace_func(void)
03880 {
03881     st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
03882     rb_remove_event_hook(0);
03883 }
03884 
03885 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
03886 
03887 /*
03888  *  call-seq:
03889  *     set_trace_func(proc)    -> proc
03890  *     set_trace_func(nil)     -> nil
03891  *
03892  *  Establishes _proc_ as the handler for tracing, or disables
03893  *  tracing if the parameter is +nil+. _proc_ takes up
03894  *  to six parameters: an event name, a filename, a line number, an
03895  *  object id, a binding, and the name of a class. _proc_ is
03896  *  invoked whenever an event occurs. Events are: <code>c-call</code>
03897  *  (call a C-language routine), <code>c-return</code> (return from a
03898  *  C-language routine), <code>call</code> (call a Ruby method),
03899  *  <code>class</code> (start a class or module definition),
03900  *  <code>end</code> (finish a class or module definition),
03901  *  <code>line</code> (execute code on a new line), <code>raise</code>
03902  *  (raise an exception), and <code>return</code> (return from a Ruby
03903  *  method). Tracing is disabled within the context of _proc_.
03904  *
03905  *      class Test
03906  *      def test
03907  *        a = 1
03908  *        b = 2
03909  *      end
03910  *      end
03911  *
03912  *      set_trace_func proc { |event, file, line, id, binding, classname|
03913  *         printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
03914  *      }
03915  *      t = Test.new
03916  *      t.test
03917  *
03918  *        line prog.rb:11               false
03919  *      c-call prog.rb:11        new    Class
03920  *      c-call prog.rb:11 initialize   Object
03921  *    c-return prog.rb:11 initialize   Object
03922  *    c-return prog.rb:11        new    Class
03923  *        line prog.rb:12               false
03924  *        call prog.rb:2        test     Test
03925  *        line prog.rb:3        test     Test
03926  *        line prog.rb:4        test     Test
03927  *      return prog.rb:4        test     Test
03928  */
03929 
03930 static VALUE
03931 set_trace_func(VALUE obj, VALUE trace)
03932 {
03933     rb_remove_event_hook(call_trace_func);
03934 
03935     if (NIL_P(trace)) {
03936         return Qnil;
03937     }
03938 
03939     if (!rb_obj_is_proc(trace)) {
03940         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03941     }
03942 
03943     rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
03944     return trace;
03945 }
03946 
03947 static void
03948 thread_add_trace_func(rb_thread_t *th, VALUE trace)
03949 {
03950     if (!rb_obj_is_proc(trace)) {
03951         rb_raise(rb_eTypeError, "trace_func needs to be Proc");
03952     }
03953 
03954     rb_threadptr_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
03955 }
03956 
03957 /*
03958  *  call-seq:
03959  *     thr.add_trace_func(proc)    -> proc
03960  *
03961  *  Adds _proc_ as a handler for tracing.
03962  *  See <code>Thread#set_trace_func</code> and +set_trace_func+.
03963  */
03964 
03965 static VALUE
03966 thread_add_trace_func_m(VALUE obj, VALUE trace)
03967 {
03968     rb_thread_t *th;
03969     GetThreadPtr(obj, th);
03970     thread_add_trace_func(th, trace);
03971     return trace;
03972 }
03973 
03974 /*
03975  *  call-seq:
03976  *     thr.set_trace_func(proc)    -> proc
03977  *     thr.set_trace_func(nil)     -> nil
03978  *
03979  *  Establishes _proc_ on _thr_ as the handler for tracing, or
03980  *  disables tracing if the parameter is +nil+.
03981  *  See +set_trace_func+.
03982  */
03983 
03984 static VALUE
03985 thread_set_trace_func_m(VALUE obj, VALUE trace)
03986 {
03987     rb_thread_t *th;
03988     GetThreadPtr(obj, th);
03989     rb_threadptr_revmove_event_hook(th, call_trace_func);
03990 
03991     if (NIL_P(trace)) {
03992         return Qnil;
03993     }
03994     thread_add_trace_func(th, trace);
03995     return trace;
03996 }
03997 
03998 static const char *
03999 get_event_name(rb_event_flag_t event)
04000 {
04001     switch (event) {
04002       case RUBY_EVENT_LINE:
04003         return "line";
04004       case RUBY_EVENT_CLASS:
04005         return "class";
04006       case RUBY_EVENT_END:
04007         return "end";
04008       case RUBY_EVENT_CALL:
04009         return "call";
04010       case RUBY_EVENT_RETURN:
04011         return "return";
04012       case RUBY_EVENT_C_CALL:
04013         return "c-call";
04014       case RUBY_EVENT_C_RETURN:
04015         return "c-return";
04016       case RUBY_EVENT_RAISE:
04017         return "raise";
04018       default:
04019         return "unknown";
04020     }
04021 }
04022 
04023 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
04024 
04025 struct call_trace_func_args {
04026     rb_event_flag_t event;
04027     VALUE proc;
04028     VALUE self;
04029     ID id;
04030     VALUE klass;
04031 };
04032 
04033 static VALUE
04034 call_trace_proc(VALUE args, int tracing)
04035 {
04036     struct call_trace_func_args *p = (struct call_trace_func_args *)args;
04037     const char *srcfile = rb_sourcefile();
04038     VALUE eventname = rb_str_new2(get_event_name(p->event));
04039     VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
04040     VALUE argv[6];
04041     int line = rb_sourceline();
04042     ID id = 0;
04043     VALUE klass = 0;
04044 
04045     if (p->event == RUBY_EVENT_C_CALL ||
04046         p->event == RUBY_EVENT_C_RETURN) {
04047         id = p->id;
04048         klass = p->klass;
04049     }
04050     else {
04051         rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
04052     }
04053     if (id == ID_ALLOCATOR)
04054       return Qnil;
04055     if (klass) {
04056         if (TYPE(klass) == T_ICLASS) {
04057             klass = RBASIC(klass)->klass;
04058         }
04059         else if (FL_TEST(klass, FL_SINGLETON)) {
04060             klass = rb_iv_get(klass, "__attached__");
04061         }
04062     }
04063 
04064     argv[0] = eventname;
04065     argv[1] = filename;
04066     argv[2] = INT2FIX(line);
04067     argv[3] = id ? ID2SYM(id) : Qnil;
04068     argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
04069     argv[5] = klass ? klass : Qnil;
04070 
04071     return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
04072 }
04073 
04074 static void
04075 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04076 {
04077     struct call_trace_func_args args;
04078 
04079     args.event = event;
04080     args.proc = proc;
04081     args.self = self;
04082     args.id = id;
04083     args.klass = klass;
04084     ruby_suppress_tracing(call_trace_proc, (VALUE)&args, FALSE);
04085 }
04086 
04087 VALUE
04088 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
04089 {
04090     rb_thread_t *th = GET_THREAD();
04091     int state, tracing;
04092     volatile int raised;
04093     volatile int outer_state;
04094     VALUE result = Qnil;
04095 
04096     if ((tracing = th->tracing) != 0 && !always) {
04097         return Qnil;
04098     }
04099     else {
04100         th->tracing = 1;
04101     }
04102 
04103     raised = rb_threadptr_reset_raised(th);
04104     outer_state = th->state;
04105     th->state = 0;
04106 
04107     PUSH_TAG();
04108     if ((state = EXEC_TAG()) == 0) {
04109         result = (*func)(arg, tracing);
04110     }
04111 
04112     if (raised) {
04113         rb_threadptr_set_raised(th);
04114     }
04115     POP_TAG();
04116 
04117     th->tracing = tracing;
04118     if (state) {
04119         JUMP_TAG(state);
04120     }
04121     th->state = outer_state;
04122 
04123     return result;
04124 }
04125 
04126 VALUE rb_thread_backtrace(VALUE thval);
04127 
04128 /*
04129  *  call-seq:
04130  *     thr.backtrace    -> array
04131  *
04132  *  Returns the current back trace of the _thr_.
04133  */
04134 
04135 static VALUE
04136 rb_thread_backtrace_m(VALUE thval)
04137 {
04138     return rb_thread_backtrace(thval);
04139 }
04140 
04141 /*
04142  *  Document-class: ThreadError
04143  *
04144  *  Raised when an invalid operation is attempted on a thread.
04145  *
04146  *  For example, when no other thread has been started:
04147  *
04148  *     Thread.stop
04149  *
04150  *  <em>raises the exception:</em>
04151  *
04152  *     ThreadError: stopping only thread
04153  */
04154 
04155 /*
04156  *  +Thread+ encapsulates the behavior of a thread of
04157  *  execution, including the main thread of the Ruby script.
04158  *
04159  *  In the descriptions of the methods in this class, the parameter _sym_
04160  *  refers to a symbol, which is either a quoted string or a
04161  *  +Symbol+ (such as <code>:name</code>).
04162  */
04163 
04164 void
04165 Init_Thread(void)
04166 {
04167 #undef rb_intern
04168 #define rb_intern(str) rb_intern_const(str)
04169 
04170     VALUE cThGroup;
04171 
04172     rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
04173     rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
04174     rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
04175     rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
04176     rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
04177     rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
04178     rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
04179     rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
04180     rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
04181     rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
04182     rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
04183     rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
04184 #if THREAD_DEBUG < 0
04185     rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
04186     rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
04187 #endif
04188 
04189     rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
04190     rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
04191     rb_define_method(rb_cThread, "join", thread_join_m, -1);
04192     rb_define_method(rb_cThread, "value", thread_value, 0);
04193     rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
04194     rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
04195     rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
04196     rb_define_method(rb_cThread, "run", rb_thread_run, 0);
04197     rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
04198     rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
04199     rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
04200     rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
04201     rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
04202     rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
04203     rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
04204     rb_define_method(rb_cThread, "status", rb_thread_status, 0);
04205     rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
04206     rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
04207     rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
04208     rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
04209     rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
04210     rb_define_method(rb_cThread, "group", rb_thread_group, 0);
04211     rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, 0);
04212 
04213     rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
04214 
04215     cThGroup = rb_define_class("ThreadGroup", rb_cObject);
04216     rb_define_alloc_func(cThGroup, thgroup_s_alloc);
04217     rb_define_method(cThGroup, "list", thgroup_list, 0);
04218     rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
04219     rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
04220     rb_define_method(cThGroup, "add", thgroup_add, 1);
04221 
04222     {
04223         rb_thread_t *th = GET_THREAD();
04224         th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
04225         rb_define_const(cThGroup, "Default", th->thgroup);
04226     }
04227 
04228     rb_cMutex = rb_define_class("Mutex", rb_cObject);
04229     rb_define_alloc_func(rb_cMutex, mutex_alloc);
04230     rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
04231     rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
04232     rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
04233     rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
04234     rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
04235     rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
04236 
04237     recursive_key = rb_intern("__recursive_key__");
04238     rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
04239 
04240     /* trace */
04241     rb_define_global_function("set_trace_func", set_trace_func, 1);
04242     rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
04243     rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
04244 
04245     /* init thread core */
04246     {
04247         /* main thread setting */
04248         {
04249             /* acquire global vm lock */
04250             rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
04251             native_mutex_initialize(lp);
04252             native_mutex_lock(lp);
04253             native_mutex_initialize(&GET_THREAD()->interrupt_lock);
04254         }
04255     }
04256 
04257     rb_thread_create_timer_thread();
04258 
04259     (void)native_mutex_trylock;
04260     (void)ruby_thread_set_native;
04261 }
04262 
04263 int
04264 ruby_native_thread_p(void)
04265 {
04266     rb_thread_t *th = ruby_thread_from_native();
04267 
04268     return th != 0;
04269 }
04270 
04271 static int
04272 check_deadlock_i(st_data_t key, st_data_t val, int *found)
04273 {
04274     VALUE thval = key;
04275     rb_thread_t *th;
04276     GetThreadPtr(thval, th);
04277 
04278     if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
04279         *found = 1;
04280     }
04281     else if (th->locking_mutex) {
04282         mutex_t *mutex;
04283         GetMutexPtr(th->locking_mutex, mutex);
04284 
04285         native_mutex_lock(&mutex->lock);
04286         if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
04287             *found = 1;
04288         }
04289         native_mutex_unlock(&mutex->lock);
04290     }
04291 
04292     return (*found) ? ST_STOP : ST_CONTINUE;
04293 }
04294 
04295 #if 0 /* for debug */
04296 static int
04297 debug_i(st_data_t key, st_data_t val, int *found)
04298 {
04299     VALUE thval = key;
04300     rb_thread_t *th;
04301     GetThreadPtr(thval, th);
04302 
04303     printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
04304     if (th->locking_mutex) {
04305         mutex_t *mutex;
04306         GetMutexPtr(th->locking_mutex, mutex);
04307 
04308         native_mutex_lock(&mutex->lock);
04309         printf(" %p %d\n", mutex->th, mutex->cond_notified);
04310         native_mutex_unlock(&mutex->lock);
04311     }
04312     else puts("");
04313 
04314     return ST_CONTINUE;
04315 }
04316 #endif
04317 
04318 static void
04319 rb_check_deadlock(rb_vm_t *vm)
04320 {
04321     int found = 0;
04322 
04323     if (vm_living_thread_num(vm) > vm->sleeper) return;
04324     if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
04325 
04326     st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
04327 
04328     if (!found) {
04329         VALUE argv[2];
04330         argv[0] = rb_eFatal;
04331         argv[1] = rb_str_new2("deadlock detected");
04332 #if 0 /* for debug */
04333         printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
04334         st_foreach(vm->living_threads, debug_i, (st_data_t)0);
04335 #endif
04336         vm->sleeper--;
04337         rb_threadptr_raise(vm->main_thread, 2, argv);
04338     }
04339 }
04340 
04341 static void
04342 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
04343 {
04344     VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
04345     if (coverage && RBASIC(coverage)->klass == 0) {
04346         long line = rb_sourceline() - 1;
04347         long count;
04348         if (RARRAY_PTR(coverage)[line] == Qnil) {
04349             rb_bug("bug");
04350         }
04351         count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
04352         if (POSFIXABLE(count)) {
04353             RARRAY_PTR(coverage)[line] = LONG2FIX(count);
04354         }
04355     }
04356 }
04357 
04358 VALUE
04359 rb_get_coverages(void)
04360 {
04361     return GET_VM()->coverages;
04362 }
04363 
04364 void
04365 rb_set_coverages(VALUE coverages)
04366 {
04367     GET_VM()->coverages = coverages;
04368     rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
04369 }
04370 
04371 void
04372 rb_reset_coverages(void)
04373 {
04374     GET_VM()->coverages = Qfalse;
04375     rb_remove_event_hook(update_coverage);
04376 }
04377 

Generated on Wed Aug 10 09:17:14 2011 for Ruby by  doxygen 1.4.7